未验证 提交 b2e4211d 编写于 作者: Z zyfncg 提交者: GitHub

[cherry-pick] Clear extra attrs of some ops in OpMaker (#46150, #46321,...

[cherry-pick] Clear extra attrs of some ops in OpMaker (#46150, #46321, #46418, #46451, #46457) (#46553)

* Clear extra attributes of some Op in OpMaker (Part4) (#46060)

* clear extra attr of some ops in opmaker

* revert clear use_cudnn for pool

* fix test_operator_desc

* fix Attr interface of OperatorBase

* clear extra attrs of condition op in opmaker (#46150)

* Clear extra attrs of lookup_table_v2 in OpMaker (#46321)

* clear extra attrs of look_up_table_v2 in opmaker

* fix bug

* clear extra attrs of quantize op in opmaker (#46418)

* delete repeated item

* clear extra attrs of distribute op in opmaker (#46451)

* clear extra atts of sequence_softmax in opmaker (#46457)
上级 a77a6f6b
......@@ -119,11 +119,6 @@ class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"The conditional variable (Cond) is used as scalar "
"condition.")
.SetDefault(false);
AddAttr<std::vector<std::string>>(ConditionalOp::kSkipEagerDeletionVars,
"Vars that would not be deleted when "
"garbage collection strategy enables")
.SetDefault(std::vector<std::string>())
.AsExtra();
AddComment(R"DOC(Conditional block operator
If `is_scalar_condition` is True, the conditional variable (Cond) is a scalar,
......
......@@ -221,11 +221,6 @@ class WhileOpMaker : public framework::OpProtoAndCheckerMaker {
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false);
AddAttr<std::vector<std::string>>(kSkipEagerDeletionVars,
"Vars that would skip eager deletion."
"Users should not set this manually.")
.SetDefault(std::vector<std::string>())
.AsExtra();
AddComment(R"DOC(
)DOC");
}
......
......@@ -432,24 +432,6 @@ class FakeQuantOrWithDequantAbsMaxOpMaker
"the received is %d",
bit_length));
});
AddAttr<int>(
"round_type",
"(int, default 1) The round type of fp32 to int."
"0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2"
"1: rounding to nearest ties away from zero. Eg: round(1.5)=2, "
"round(2.5)=3")
.SetDefault(1)
.AddCustomChecker([](const int &round_type) {
PADDLE_ENFORCE_EQ(
round_type == 0 || round_type == 1,
true,
platform::errors::InvalidArgument(
"'round_type' should be 0 or 1, 0 rounding to "
"nearest ties to even and 1 is rounding to nearest "
"ties away from zero.but the received is %d",
round_type));
})
.AsExtra();
AddComment(R"DOC(
This is a Base Op which supports FakeQuantAbsMaxOpMaker and FakeQuantDequantAbsMaxOpMaker.
FakeQuantAbsMaxOp operator is used in the dynamic quantization.
......@@ -529,24 +511,6 @@ class FakeChannelWiseQuantizeAbsMaxOpMaker
"the received is %d",
bit_length));
});
AddAttr<int>(
"round_type",
"(int, default 1) The round type of fp32 to int."
"0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2"
"1: rounding to nearest ties away from zero. Eg: round(1.5)=2, "
"round(2.5)=3")
.SetDefault(1)
.AddCustomChecker([](const int &round_type) {
PADDLE_ENFORCE_EQ(
round_type == 0 || round_type == 1,
true,
platform::errors::InvalidArgument(
"'round_type' should be 0 or 1, 0 rounding to "
"nearest ties to even and 1 is rounding to nearest "
"ties away from zero.but the received is %d",
round_type));
})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
......@@ -628,24 +592,6 @@ class FakeChannelWiseQuantizeDequantizeAbsMaxOpMaker
"the received is %d",
bit_length));
});
AddAttr<int>(
"round_type",
"(int, default 1) The round type of fp32 to int."
"0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2"
"1: rounding to nearest ties away from zero. Eg: round(1.5)=2, "
"round(2.5)=3")
.SetDefault(1)
.AddCustomChecker([](const int &round_type) {
PADDLE_ENFORCE_EQ(
round_type == 0 || round_type == 1,
true,
platform::errors::InvalidArgument(
"'round_type' should be 0 or 1, 0 rounding to "
"nearest ties to even and 1 is rounding to nearest "
"ties away from zero.but the received is %d",
round_type));
})
.AsExtra();
AddComment(R"DOC(
The scale of FakeChannelWiseQuantize operator is a vector.
In detail, each channel of the input X has a scale value.
......@@ -715,24 +661,6 @@ class FakeQuantizeRangeAbsMaxOpMaker
"the received is %d",
bit_length));
});
AddAttr<int>(
"round_type",
"(int, default 1) The round type of fp32 to int."
"0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2"
"1: rounding to nearest ties away from zero. Eg: round(1.5)=2, "
"round(2.5)=3")
.SetDefault(1)
.AddCustomChecker([](const int &round_type) {
PADDLE_ENFORCE_EQ(
round_type == 0 || round_type == 1,
true,
platform::errors::InvalidArgument(
"'round_type' should be 0 or 1, 0 rounding to "
"nearest ties to even and 1 is rounding to nearest "
"ties away from zero.but the received is %d",
round_type));
})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
......@@ -815,24 +743,6 @@ class FakeQuantOrWithDequantMovingAverageAbsMaxOpMaker
"the received is %d",
bit_length));
});
AddAttr<int>(
"round_type",
"(int, default 1) The round type of fp32 to int."
"0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2"
"1: rounding to nearest ties away from zero. Eg: round(1.5)=2, "
"round(2.5)=3")
.SetDefault(1)
.AddCustomChecker([](const int &round_type) {
PADDLE_ENFORCE_EQ(
round_type == 0 || round_type == 1,
true,
platform::errors::InvalidArgument(
"'round_type' should be 0 or 1, 0 rounding to "
"nearest ties to even and 1 is rounding to nearest "
"ties away from zero.but the received is %d",
round_type));
})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
......
......@@ -84,46 +84,12 @@ class LookupTableV2OpMaker : public framework::OpProtoAndCheckerMaker {
"An input with type int64 "
"contains the ids to be looked up in W.");
AddOutput("Out", "The lookup results, which have the same type as W.");
AddAttr<bool>("is_sparse",
"(boolean, default false) "
"Sparse update.")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("is_distributed",
"(boolean, default false) distributed lookup table.")
.SetDefault(false)
.AsExtra();
AddAttr<int64_t>("padding_idx",
"(int64, default -1) "
"If the value is -1, it makes no effect to lookup. "
"Otherwise the given value indicates padding the output "
"with zeros whenever lookup encounters it in Ids.")
.SetDefault(kNoPadding);
// for parameter prefetch
AddAttr<bool>("remote_prefetch", "").SetDefault(false).AsExtra();
AddAttr<int>("trainer_id", "trainer id from 0 ~ worker_num.")
.SetDefault(0)
.AsExtra();
AddAttr<int>("slot", "slot of id").SetDefault(0).AsExtra();
AddAttr<std::vector<int64_t>>("height_sections",
"Height for each output SelectedRows.")
.SetDefault(std::vector<int64_t>({}))
.AsExtra();
AddAttr<std::vector<std::string>>(
"epmap",
"(string vector, default 127.0.0.1:6164)"
"Server endpoints in the order of input variables for mapping")
.SetDefault({})
.AsExtra();
AddAttr<std::vector<std::string>>(
"table_names",
"(string vector, the split table names that will be fetched from "
"parameter server)"
"in the order of input variables for mapping")
.SetDefault({})
.AsExtra();
AddComment(R"DOC(
Lookup Table V2 Operator.
......
......@@ -207,34 +207,6 @@ class NCEOpMaker : public framework::OpProtoAndCheckerMaker {
// for parameter prefetch
AddAttr<bool>("remote_prefetch", "").SetDefault(false);
AddAttr<int>("trainer_id", "trainer id from 0 ~ worker_num.")
.SetDefault(0)
.AsExtra();
AddAttr<std::vector<int64_t>>("height_sections",
"Height for each output SelectedRows.")
.SetDefault(std::vector<int64_t>({}))
.AsExtra();
AddAttr<std::vector<std::string>>(
"epmap",
"(string vector, default 127.0.0.1:6164)"
"Server endpoints in the order of input variables for mapping")
.SetDefault({})
.AsExtra();
AddAttr<std::vector<std::string>>(
"table_names",
"(string vector, the split table names that will be fetched from "
"parameter server)"
"in the order of input variables for mapping")
.SetDefault({})
.AsExtra();
AddAttr<std::vector<int>>("custom_neg_classes",
"This attribute only be used in unitest. Classes "
"in this list wiil be used as negative classes "
"for every samples. Under normal conditions, "
"user should avoid setting this attribute.")
.SetDefault({})
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference "
"only, false for training.")
......
......@@ -113,11 +113,6 @@ class DistributedPushSparseOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<bool>("use_cvm_op", "(boolean, default false) Use cvm op or not.")
.SetDefault(false);
AddAttr<std::vector<int>>("slots",
"[slot_id1, slot_id2] Slots array of Ids.")
.SetDefault({})
.AsExtra();
AddComment(R"DOC(
Lookup Tablel Prefetch Operator.
This operator is used to perform lookup on parameter W,
......
......@@ -134,10 +134,6 @@ class QuantizeLinearOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("OutScale", "(Tensor) Current scale")
.AsDispensable()
.AsExtra(); // only qat use
AddAttr<float>("moving_rate",
"(float, default 0.9) moving rate.") // only qat use
.SetDefault(0.9)
.AsExtra();
AddAttr<int>("quant_axis",
"(int, default 0) The axis for quantization. "
"For conv2d, depthwise_conv2d, conv2d_transpose "
......
......@@ -73,14 +73,6 @@ class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
"An optional string from: \"NHWC\", \"NCHW\". "
"Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ")
.SetDefault("AnyLayout")
.AsExtra();
AddComment(R"DOC(
Sequence Softmax Operator.
......
......@@ -59,7 +59,7 @@ ATTR_TYPE_STRING_MAP = {
def parse_attr(attr_str):
result = re.search(
r"(?P<attr_type>[a-z[\]]+)\s+(?P<name>[a-zA-Z0-9_]+)\s*=\s*(?P<default_val>\S+)",
r"(?P<attr_type>[a-zA-Z0-9_[\]]+)\s+(?P<name>[a-zA-Z0-9_]+)\s*=\s*(?P<default_val>\S+)",
attr_str)
return ATTR_TYPE_STRING_MAP[result.group('attr_type')], result.group(
'name'), result.group('default_val')
......
......@@ -93,6 +93,11 @@
extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]
- op : conditional_block
backward : conditional_block_grad
extra :
attrs : ['str[] skip_eager_deletion_vars = {}']
- op : conv2d
backward : conv2d_grad
extra :
......@@ -174,6 +179,10 @@
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- op : dequantize_linear
extra :
attrs : [float moving_rate = 0.9]
- op : diag (diag_v2)
backward : diag_grad (diag_v2_grad)
inputs :
......@@ -199,6 +208,10 @@
outputs :
out : Out
- op : distributed_push_sparse
extra :
attrs : ['int[] slots = {}']
- op : divide (elementwise_div)
backward : divide_grad (elementwise_div)
extra :
......@@ -232,6 +245,13 @@
extra :
attrs : [bool use_mkldnn = false]
- op : embedding (lookup_table_v2)
backward : embedding_grad (lookup_table_v2_grad)
extra :
attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
'str[] table_names = {}']
- op : erf
inputs :
x : X
......@@ -259,6 +279,34 @@
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : fake_channel_wise_quantize_abs_max
extra :
attrs : [int round_type = 1]
- op : fake_channel_wise_quantize_dequantize_abs_max
extra :
attrs : [int round_type = 1]
- op : fake_quantize_abs_max
extra :
attrs : [int round_type = 1]
- op : fake_quantize_dequantize_abs_max
extra :
attrs : [int round_type = 1]
- op : fake_quantize_dequantize_moving_average_abs_max
extra :
attrs : [int round_type = 1]
- op : fake_quantize_moving_average_abs_max
extra :
attrs : [int round_type = 1]
- op : fake_quantize_range_abs_max
extra :
attrs : [int round_type = 1]
- op : fft_c2c
inputs: {x: X}
outputs: {out: Out}
......@@ -441,6 +489,12 @@
outputs :
out : Out
- op : nce
backward : nce_grad
extra :
attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
'str[] table_names = {}', 'int[] custom_neg_classes = {}']
- op : nearest_interp (nearest_interp_v2)
backward : nearest_interp_grad (nearest_interp_v2_grad)
extra :
......@@ -483,6 +537,10 @@
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- op : quantize_linear
extra :
attrs : [float moving_rate = 0.9]
- op : reciprocal
backward : reciprocal_grad
extra :
......@@ -574,6 +632,11 @@
extra :
attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]
- op : sequence_softmax
backward : sequence_softmax_grad
extra :
attrs : [str data_format = "AnyLayout"]
- op : shape
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
......@@ -650,11 +713,6 @@
extra :
attrs : [bool use_mkldnn = false]
- op : stack
backward : stack_grad
extra :
attrs : [bool use_mkldnn = false]
- op : subtract (elementwise_sub)
backward : subtract_grad (elementwise_sub_grad)
extra :
......@@ -708,3 +766,8 @@
x : X
outputs :
out : Out
- op : while
backward : while_grad
extra :
attrs : ['str[] skip_eager_deletion_vars = {}']
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册