Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
2800897a
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2800897a
编写于
6月 18, 2021
作者:
王
王明冬
提交者:
GitHub
6月 18, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add compat precondition for cpu_quantize_squash_pass, test=develop (#33611)
上级
6cacd63e
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
101 addition
and
16 deletion
+101
-16
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc
+71
-6
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.h
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.h
+1
-4
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc
...id/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc
+14
-3
paddle/fluid/operators/compat/conv2d.pbtxt
paddle/fluid/operators/compat/conv2d.pbtxt
+7
-3
paddle/fluid/operators/compat/scale.pbtxt
paddle/fluid/operators/compat/scale.pbtxt
+8
-0
未找到文件。
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc
浏览文件 @
2800897a
...
...
@@ -25,10 +25,60 @@ namespace paddle {
namespace
framework
{
namespace
ir
{
class
Graph
;
using
string
::
PrettyLogDetail
;
CPUQuantizeSquashPass
::
CPUQuantizeSquashPass
()
{
AddOpCompat
(
OpCompat
(
"scale"
))
.
AddInput
(
"X"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"Out"
)
.
IsTensor
()
.
End
()
.
AddAttr
(
"bias"
)
.
IsNumEQ
(
0.0
f
)
.
End
()
.
AddAttr
(
"scale"
)
.
IsNumGT
(
0.0
f
)
.
End
()
.
AddAttr
(
"bias_after_scale"
)
// bias equal to 0.0, so this attribute is
// unconstrained.
.
End
();
AddOpCompat
(
OpCompat
(
"conv2d"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"Filter"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"Bias"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"ResidualData"
)
.
IsTensor
()
.
IsOptional
()
.
End
()
.
AddOutput
(
"Output"
)
.
IsTensor
()
.
End
()
.
AddAttr
(
"strides"
)
.
End
()
.
AddAttr
(
"paddings"
)
.
End
()
.
AddAttr
(
"padding_algorithm"
)
.
IsStringIn
({
"EXPLICIT"
,
"SAME"
,
"VALID"
})
.
End
()
.
AddAttr
(
"groups"
)
.
IsNumGE
(
1
)
.
End
()
.
AddAttr
(
"dilations"
)
.
End
()
.
AddAttr
(
"data_format"
)
.
IsStringIn
({
"NCHW"
,
"NHWC"
})
.
End
();
}
void
CPUQuantizeSquashPass
::
FindNodesToKeep
(
Graph
*
graph
,
std
::
unordered_map
<
const
Node
*
,
int
>*
nodes_keep_counter
)
const
{
...
...
@@ -354,6 +404,10 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const {
int
found_dequant_scale_squash_count
=
0
;
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
Graph
*
g
)
{
if
(
!
IsCompat
(
subgraph
,
g
))
{
LOG
(
WARNING
)
<<
"Pass in op compat failed."
;
return
;
}
VLOG
(
4
)
<<
"squash dequant-scale ops pair"
;
GET_IR_NODE_FROM_SUBGRAPH
(
dequant_op
,
dequant_op
,
dequant_scale_pattern
);
...
...
@@ -362,9 +416,10 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const {
GET_IR_NODE_FROM_SUBGRAPH
(
scale_out
,
scale_out
,
dequant_scale_pattern
);
if
(
dequant_out
->
outputs
.
size
()
==
1
&&
scale_op
->
Op
()
->
GetAttrIfExists
<
float
>
(
"bias"
)
==
0.0
)
{
BOOST_GET_CONST
(
float
,
scale_op
->
Op
()
->
GetAttr
(
"bias"
))
==
0.0
f
)
{
auto
dequant_scale
=
dequant_op
->
Op
()
->
GetAttrIfExists
<
float
>
(
"Scale"
);
auto
scale_scale
=
scale_op
->
Op
()
->
GetAttrIfExists
<
float
>
(
"scale"
);
float
scale_scale
=
BOOST_GET_CONST
(
float
,
scale_op
->
Op
()
->
GetAttr
(
"scale"
));
PADDLE_ENFORCE_GT
(
dequant_scale
,
0.0
f
,
platform
::
errors
::
InvalidArgument
(
...
...
@@ -399,6 +454,10 @@ void CPUQuantizeSquashPass::ScaleQuantSquash(Graph* graph) const {
int
found_scale_quant_squash_count
=
0
;
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
Graph
*
g
)
{
if
(
!
IsCompat
(
subgraph
,
g
))
{
LOG
(
WARNING
)
<<
"Pass in op compat failed."
;
return
;
}
VLOG
(
4
)
<<
"squash scale-quant ops pair"
;
GET_IR_NODE_FROM_SUBGRAPH
(
scale_in
,
scale_in
,
scale_quant_pattern
);
...
...
@@ -407,9 +466,10 @@ void CPUQuantizeSquashPass::ScaleQuantSquash(Graph* graph) const {
GET_IR_NODE_FROM_SUBGRAPH
(
quant_op
,
quant_op
,
scale_quant_pattern
);
if
(
quant_in
->
outputs
.
size
()
==
1
&&
scale_op
->
Op
()
->
GetAttrIfExists
<
float
>
(
"bias"
)
==
0.0
)
{
BOOST_GET_CONST
(
float
,
scale_op
->
Op
()
->
GetAttr
(
"bias"
))
==
0.0
f
)
{
auto
quant_scale
=
quant_op
->
Op
()
->
GetAttrIfExists
<
float
>
(
"Scale"
);
auto
scale_scale
=
scale_op
->
Op
()
->
GetAttrIfExists
<
float
>
(
"scale"
);
float
scale_scale
=
BOOST_GET_CONST
(
float
,
scale_op
->
Op
()
->
GetAttr
(
"scale"
));
PADDLE_ENFORCE_GT
(
quant_scale
,
0.0
f
,
...
...
@@ -443,6 +503,11 @@ void CPUQuantizeSquashPass::QuantizeBf16Conv(Graph* graph) const {
int
found_quant_conv_squash_count
=
0
;
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
Graph
*
g
)
{
if
(
!
IsCompat
(
subgraph
,
g
))
{
LOG
(
WARNING
)
<<
"Pass in op compat failed."
;
return
;
}
VLOG
(
4
)
<<
"squash quant-conv2d ops pair"
;
GET_IR_NODE_FROM_SUBGRAPH
(
quant_in
,
quant_in
,
pattern
);
...
...
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.h
浏览文件 @
2800897a
...
...
@@ -19,9 +19,6 @@
#include <unordered_map>
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/framework/ir/pass.h"
namespace
paddle
{
namespace
framework
{
...
...
@@ -30,10 +27,10 @@ namespace ir {
/*
* Squash dequantize->quantize pair pattern into requantize op
*/
class
Graph
;
class
CPUQuantizeSquashPass
:
public
FusePassBase
{
public:
CPUQuantizeSquashPass
();
virtual
~
CPUQuantizeSquashPass
()
{}
protected:
...
...
paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc
浏览文件 @
2800897a
...
...
@@ -25,7 +25,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name,
const
std
::
vector
<
std
::
string
>&
inputs
,
const
std
::
vector
<
std
::
string
>&
outputs
,
bool
use_mkldnn
,
const
std
::
vector
<
float
>
scale
=
{},
float
bias
=
0.0
,
const
std
::
string
&
mkldnn_data_type
=
"float32"
)
{
const
std
::
string
&
mkldnn_data_type
=
"float32"
,
bool
bias_after_scale
=
false
,
int
groups
=
1
)
{
auto
*
op
=
prog
->
MutableBlock
(
0
)
->
AppendOp
();
op
->
SetType
(
type
);
op
->
SetAttr
(
"use_mkldnn"
,
use_mkldnn
);
...
...
@@ -37,6 +38,15 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name,
if
(
inputs
.
size
()
>
1
)
op
->
SetInput
(
"Filter"
,
{
inputs
[
1
]});
if
(
inputs
.
size
()
>
2
)
op
->
SetInput
(
"Bias"
,
{
inputs
[
2
]});
op
->
SetOutput
(
"Output"
,
{
outputs
[
0
]});
const
std
::
vector
<
int
>
strides
({
1
,
1
});
const
std
::
vector
<
int
>
paddings
({
1
,
1
});
const
std
::
vector
<
int
>
dilations
({
1
,
1
});
op
->
SetAttr
(
"strides"
,
strides
);
op
->
SetAttr
(
"paddings"
,
paddings
);
op
->
SetAttr
(
"dilations"
,
dilations
);
op
->
SetAttr
(
"groups"
,
groups
);
op
->
SetAttr
(
"padding_algorithm"
,
std
::
string
(
"EXPLICIT"
));
op
->
SetAttr
(
"data_format"
,
std
::
string
(
"NCHW"
));
op
->
SetAttr
(
"force_fp32_output"
,
false
);
op
->
SetAttr
(
"mkldnn_data_type"
,
mkldnn_data_type
);
}
else
if
(
type
==
"quantize"
)
{
...
...
@@ -74,6 +84,7 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name,
op
->
SetOutput
(
"Out"
,
{
outputs
[
0
]});
op
->
SetAttr
(
"scale"
,
scale
[
0
]);
op
->
SetAttr
(
"bias"
,
bias
);
op
->
SetAttr
(
"bias_after_scale"
,
bias_after_scale
);
}
else
if
(
type
==
"matmul"
)
{
op
->
SetInput
(
"X"
,
{
inputs
[
0
]});
op
->
SetInput
(
"Y"
,
{
inputs
[
1
]});
...
...
@@ -373,8 +384,8 @@ ProgramDesc BuildQuantConv2dProgramDesc(const bool& use_mkldnn,
prog
.
MutableBlock
(
0
)
->
Var
(
v
);
}
SetOp
(
&
prog
,
"quantize"
,
"Quant"
,
{
"a"
},
{
"b"
},
use_mkldnn
,
{
quant_scale
});
SetOp
(
&
prog
,
"conv2d"
,
"Conv2d"
,
{
"b"
},
{
"c"
},
use_mkldnn
,
{},
0.0
f
,
mkldnn_data_type
);
SetOp
(
&
prog
,
"conv2d"
,
"Conv2d"
,
{
"b"
,
"filter"
,
"bias"
},
{
"c"
},
use_mkldnn
,
{},
0.0
f
,
mkldnn_data_type
);
return
prog
;
}
...
...
paddle/fluid/operators/compat/conv2d.pbtxt
浏览文件 @
2800897a
...
...
@@ -9,6 +9,9 @@ def {
inputs {
name: "Bias"
}
inputs {
name: "ResidualData"
}
outputs {
name: "Output"
}
...
...
@@ -38,13 +41,14 @@ def {
}
}
extra {
inputs {
name: "ResidualData"
}
attrs {
name: "is_test"
type: BOOLEAN
}
attrs {
name: "name"
type: STRING
}
attrs {
name: "use_cudnn"
type: BOOLEAN
...
...
paddle/fluid/operators/compat/scale.pbtxt
浏览文件 @
2800897a
...
...
@@ -20,6 +20,14 @@ def {
}
}
extra {
attrs {
name: "name"
type: STRING
}
attrs {
name: "use_mkldnn"
type: BOOLEAN
}
attrs {
name: "op_role"
type: INT
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录