Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
cb28753c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
cb28753c
编写于
8月 24, 2021
作者:
王
王明冬
提交者:
GitHub
8月 24, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add the extra and quantization for op def, test=develop (#35076)
上级
b0a1d122
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
117 addition
and
48 deletion
+117
-48
paddle/fluid/framework/attribute.h
paddle/fluid/framework/attribute.h
+17
-4
paddle/fluid/framework/framework.proto
paddle/fluid/framework/framework.proto
+4
-0
paddle/fluid/framework/ir/op_compat_sensible_pass_tester.cc
paddle/fluid/framework/ir/op_compat_sensible_pass_tester.cc
+1
-1
paddle/fluid/framework/op_proto_maker.cc
paddle/fluid/framework/op_proto_maker.cc
+10
-5
paddle/fluid/framework/op_proto_maker.h
paddle/fluid/framework/op_proto_maker.h
+11
-1
paddle/fluid/operators/conv_op.cc
paddle/fluid/operators/conv_op.cc
+74
-37
未找到文件。
paddle/fluid/framework/attribute.h
浏览文件 @
cb28753c
...
...
@@ -322,8 +322,19 @@ class TypedAttrChecker {
typedef
std
::
function
<
void
(
const
T
&
)
>
ValueChecker
;
public:
explicit
TypedAttrChecker
(
const
std
::
string
&
attr_name
)
:
attr_name_
(
attr_name
)
{}
explicit
TypedAttrChecker
(
const
std
::
string
&
attr_name
,
proto
::
OpProto_Attr
*
attr
)
:
attr_name_
(
attr_name
),
attr_
(
attr
)
{}
TypedAttrChecker
&
AsExtra
()
{
attr_
->
set_extra
(
true
);
return
*
this
;
}
TypedAttrChecker
&
AsQuant
()
{
attr_
->
set_quant
(
true
);
return
*
this
;
}
TypedAttrChecker
&
InEnum
(
const
std
::
unordered_set
<
T
>&
range
)
{
value_checkers_
.
push_back
(
EnumInContainer
<
T
>
(
range
));
...
...
@@ -398,6 +409,7 @@ class TypedAttrChecker {
private:
std
::
string
attr_name_
;
proto
::
OpProto_Attr
*
attr_
;
std
::
vector
<
ValueChecker
>
value_checkers_
;
std
::
vector
<
DefaultValueChecker
>
default_value_setter_
;
};
...
...
@@ -408,8 +420,9 @@ class OpAttrChecker {
public:
template
<
typename
T
>
TypedAttrChecker
<
T
>&
AddAttrChecker
(
const
std
::
string
&
attr_name
)
{
attr_checkers_
.
push_back
(
TypedAttrChecker
<
T
>
(
attr_name
));
TypedAttrChecker
<
T
>&
AddAttrChecker
(
const
std
::
string
&
attr_name
,
proto
::
OpProto_Attr
*
attr
)
{
attr_checkers_
.
push_back
(
TypedAttrChecker
<
T
>
(
attr_name
,
attr
));
AttrChecker
&
checker
=
attr_checkers_
.
back
();
return
*
(
checker
.
target
<
TypedAttrChecker
<
T
>>
());
}
...
...
paddle/fluid/framework/framework.proto
浏览文件 @
cb28753c
...
...
@@ -90,6 +90,8 @@ message OpProto {
optional
bool
duplicable
=
3
[
default
=
false
];
optional
bool
intermediate
=
4
[
default
=
false
];
optional
bool
dispensable
=
5
[
default
=
false
];
optional
bool
extra
=
6
[
default
=
false
];
optional
bool
quant
=
7
[
default
=
false
];
}
// AttrProto describes the C++ type Attribute.
...
...
@@ -101,6 +103,8 @@ message OpProto {
// language binding has responsibility to fill that
// attribute. End-User should not set that attribute.
optional
bool
generated
=
4
[
default
=
false
];
optional
bool
extra
=
5
[
default
=
false
];
optional
bool
quant
=
6
[
default
=
false
];
}
required
string
type
=
1
;
...
...
paddle/fluid/framework/ir/op_compat_sensible_pass_tester.cc
浏览文件 @
cb28753c
...
...
@@ -102,7 +102,7 @@ TEST(OpCompatSensiblePass, compatOpAttribute) {
EXPECT_FALSE
(
compat
.
Judge
(
fc_op
,
"test_pass"
));
OpCompat
compat_1
(
"fc_test"
);
info
.
checker_
->
AddAttrChecker
<
int
>
(
"in_num_col_dims"
).
SetDefault
(
1
);
info
.
checker_
->
AddAttrChecker
<
int
>
(
"in_num_col_dims"
,
nullptr
).
SetDefault
(
1
);
EXPECT_TRUE
(
compat_1
.
Judge
(
fc_op
,
"test_pass"
));
delete
info
.
checker_
;
delete
info
.
proto_
;
...
...
paddle/fluid/framework/op_proto_maker.cc
浏览文件 @
cb28753c
...
...
@@ -80,19 +80,24 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
static_cast
<
int
>
(
OpRole
::
kOptimize
)
|
static_cast
<
int
>
(
OpRole
::
kLRSched
),
static_cast
<
int
>
(
OpRole
::
kNotSpecified
)})
.
SetDefault
(
static_cast
<
int
>
(
OpRole
::
kNotSpecified
));
.
SetDefault
(
static_cast
<
int
>
(
OpRole
::
kNotSpecified
))
.
AsExtra
();
AddAttr
<
std
::
vector
<
std
::
string
>>
(
OpRoleVarAttrName
(),
"Optimized for variable"
)
.
SetDefault
({});
.
SetDefault
({})
.
AsExtra
();
AddAttr
<
std
::
string
>
(
OpNamescopeAttrName
(),
"Operator name with namesope."
)
.
SetDefault
(
""
);
.
SetDefault
(
""
)
.
AsExtra
();
AddAttr
<
std
::
vector
<
std
::
string
>>
(
OpCreationCallstackAttrName
(),
"Callstack for Op Creatation."
)
.
SetDefault
({});
.
SetDefault
({})
.
AsExtra
();
AddAttr
<
std
::
string
>
(
OpDeviceAttrName
(),
"Device type of this operator."
)
.
SetDefault
(
""
);
.
SetDefault
(
""
)
.
AsExtra
();
Validate
();
}
...
...
paddle/fluid/framework/op_proto_maker.h
浏览文件 @
cb28753c
...
...
@@ -75,6 +75,16 @@ class OpProtoAndCheckerMaker {
var_
->
set_dispensable
(
true
);
return
*
this
;
}
VariableBuilder
&
AsExtra
()
{
var_
->
set_extra
(
true
);
return
*
this
;
}
VariableBuilder
&
AsQuant
()
{
var_
->
set_quant
(
true
);
return
*
this
;
}
};
VariableBuilder
AddInput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
);
...
...
@@ -91,7 +101,7 @@ class OpProtoAndCheckerMaker {
attr
->
set_comment
(
comment
);
attr
->
set_generated
(
generated
);
attr
->
set_type
(
AttrTypeID
<
T
>
());
return
op_checker_
->
AddAttrChecker
<
T
>
(
name
);
return
op_checker_
->
AddAttrChecker
<
T
>
(
name
,
attr
);
}
void
AddComment
(
const
std
::
string
&
comment
)
{
proto_
->
set_comment
(
comment
);
}
...
...
paddle/fluid/operators/conv_op.cc
浏览文件 @
cb28753c
...
...
@@ -246,7 +246,8 @@ void Conv2DOpMaker::Make() {
AddAttr
<
bool
>
(
"is_test"
,
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddInput
(
"Input"
,
"(Tensor) The input tensor of convolution operator. "
"The format of input tensor is NCHW or NHWC, where N is batch size, "
...
...
@@ -264,12 +265,14 @@ void Conv2DOpMaker::Make() {
"(Tensor) Bias to be added to each output of filter application."
"The format of output tensor is X (one-dimensional) of size equal"
"to the number of output channels. Only used with MKL-DNN."
)
.
AsDispensable
();
.
AsDispensable
()
.
AsExtra
();
AddInput
(
"ResidualData"
,
"(Tensor) Tensor with residual data "
"to which convolution output will be added."
"Used with fuse_residual_connection fusion."
)
.
AsDispensable
();
.
AsDispensable
()
.
AsExtra
();
AddOutput
(
"Output"
,
"(Tensor) The output tensor of convolution operator. "
"It has same data fromat and data type as the Input."
);
...
...
@@ -306,69 +309,87 @@ void Conv2DOpMaker::Make() {
AddAttr
<
bool
>
(
"use_cudnn"
,
"(bool, default false) Only used in cudnn kernel, need install cudnn"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
bool
>
(
"fuse_relu_before_depthwise_conv"
,
"(bool, default false) Only used in cuda depthwise kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
bool
>
(
"use_mkldnn"
,
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
bool
>
(
"use_quantizer"
,
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"mkldnn_data_type"
,
"(string, default
\"
float32
\"
). Data type of mkldnn kernel"
)
.
SetDefault
(
"float32"
)
.
InEnum
({
"float32"
,
"int8"
,
"bfloat16"
});
.
InEnum
({
"float32"
,
"int8"
,
"bfloat16"
})
.
AsExtra
();
AddAttr
<
bool
>
(
"fuse_relu"
,
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
bool
>
(
"fuse_brelu"
,
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
float
>
(
"fuse_brelu_threshold"
,
"(float, default false 6.0) Only used in mkldnn kernel"
)
.
SetDefault
(
6.0
f
);
.
SetDefault
(
6.0
f
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"fuse_activation"
,
"(string, default
\"\"
) Only used in mkldnn kernel"
)
.
SetDefault
(
""
);
.
SetDefault
(
""
)
.
AsExtra
();
AddAttr
<
float
>
(
"fuse_alpha"
,
"(float, default 0.0) Only used in mkldnn kernel"
)
.
SetDefault
(
0.0
f
);
.
SetDefault
(
0.0
f
)
.
AsExtra
();
AddAttr
<
float
>
(
"fuse_beta"
,
"(float, default 0.0) Only used in mkldnn kernel"
)
.
SetDefault
(
0.0
f
);
.
SetDefault
(
0.0
f
)
.
AsExtra
();
AddAttr
<
bool
>
(
"use_addto"
,
"(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
bool
>
(
"fuse_residual_connection"
,
"(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual "
"connection."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
float
>
(
"Scale_in"
,
"Scale_in to be used for int8 input data."
"Only used with MKL-DNN INT8."
)
.
SetDefault
(
1.0
f
);
.
SetDefault
(
1.0
f
)
.
AsExtra
();
AddAttr
<
float
>
(
"Scale_out"
,
"Scale_out to be used for int8 output data."
"Only used with MKL-DNN INT8."
)
.
SetDefault
(
1.0
f
);
.
SetDefault
(
1.0
f
)
.
AsExtra
();
AddAttr
<
float
>
(
"Scale_in_eltwise"
,
"Scale_in_eltwise to be used for int8 eltwise input data."
"Only used with MKL-DNN INT8."
)
.
SetDefault
(
1.0
f
);
.
SetDefault
(
1.0
f
)
.
AsExtra
();
AddAttr
<
std
::
vector
<
float
>>
(
"Scale_weights"
,
"Scale_weights to be used for int8 weights data."
"Only used with MKL-DNN INT8."
)
.
SetDefault
({
1.0
f
});
.
SetDefault
({
1.0
f
})
.
AsExtra
();
AddAttr
<
bool
>
(
"force_fp32_output"
,
"(bool, default false) Force INT8 kernel output FP32, only "
"used in MKL-DNN INT8"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"data_format"
,
"(string, default NCHW) Only used in "
...
...
@@ -384,12 +405,14 @@ void Conv2DOpMaker::Make() {
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully."
)
.
SetDefault
(
platform
::
GetDefaultConvWorkspaceSizeLimitMB
());
.
SetDefault
(
platform
::
GetDefaultConvWorkspaceSizeLimitMB
())
.
AsExtra
();
AddAttr
<
bool
>
(
"exhaustive_search"
,
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddComment
(
R"DOC(
Convolution Operator.
...
...
@@ -426,7 +449,8 @@ void Conv3DOpMaker::Make() {
AddAttr
<
bool
>
(
"is_test"
,
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddInput
(
"Input"
,
"(Tensor) The input tensor of convolution operator. "
...
...
@@ -447,7 +471,8 @@ void Conv3DOpMaker::Make() {
"(Tensor) Tensor with residual data "
"to which convolution output will be added."
"Used with fuse_residual_connection fusion."
)
.
AsDispensable
();
.
AsDispensable
()
.
AsExtra
();
AddOutput
(
"Output"
,
"(Tensor) The output tensor of convolution operator."
"It has same data fromat and data type as the Input."
);
...
...
@@ -485,35 +510,44 @@ void Conv3DOpMaker::Make() {
AddAttr
<
bool
>
(
"use_cudnn"
,
"(bool, default false) Only used in cudnn kernel, need install cudnn"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
bool
>
(
"use_mkldnn"
,
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"mkldnn_data_type"
,
"(string, default
\"
float32
\"
). Data type of mkldnn kernel"
)
.
SetDefault
(
"float32"
)
.
InEnum
({
"float32"
,
"int8"
,
"bfloat16"
});
.
InEnum
({
"float32"
,
"int8"
,
"bfloat16"
})
.
AsExtra
();
AddAttr
<
bool
>
(
"fuse_relu"
,
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"fuse_activation"
,
"(string, default
\"\"
) Only used in mkldnn kernel"
)
.
SetDefault
(
""
);
.
SetDefault
(
""
)
.
AsExtra
();
AddAttr
<
float
>
(
"fuse_alpha"
,
"(float, default 0.0) Only used in mkldnn kernel"
)
.
SetDefault
(
0.0
f
);
.
SetDefault
(
0.0
f
)
.
AsExtra
();
AddAttr
<
float
>
(
"fuse_beta"
,
"(float, default 0.0) Only used in mkldnn kernel"
)
.
SetDefault
(
0.0
f
);
.
SetDefault
(
0.0
f
)
.
AsExtra
();
AddAttr
<
bool
>
(
"use_addto"
,
"(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
bool
>
(
"fuse_residual_connection"
,
"(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual "
"connection."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddAttr
<
std
::
string
>
(
"data_format"
,
"(string, default NCDHW) Only used in "
...
...
@@ -523,7 +557,8 @@ void Conv3DOpMaker::Make() {
.
SetDefault
(
"NCDHW"
);
AddAttr
<
bool
>
(
"force_fp32_output"
,
"(bool, default false) Only used in mkldnn INT8 kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
// TODO(dzhwinter): need to registered layout transform function
AddAttr
<
int
>
(
"workspace_size_MB"
,
"Only used in cudnn kernel. workspace size for cudnn, in MB, "
...
...
@@ -531,12 +566,14 @@ void Conv3DOpMaker::Make() {
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully."
)
.
SetDefault
(
platform
::
GetDefaultConvWorkspaceSizeLimitMB
());
.
SetDefault
(
platform
::
GetDefaultConvWorkspaceSizeLimitMB
())
.
AsExtra
();
AddAttr
<
bool
>
(
"exhaustive_search"
,
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False."
)
.
SetDefault
(
false
);
.
SetDefault
(
false
)
.
AsExtra
();
AddComment
(
R"DOC(
Convolution3D Operator.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录