Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
49108efa
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
49108efa
编写于
12月 15, 2021
作者:
W
wenbin
提交者:
GitHub
12月 15, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove bf16 (#38133)
* remove bf16 * remove comments * remove wrong return * fix UT
上级
b28c374a
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
51 addition
and
0 deletion
+51
-0
paddle/fluid/framework/ir/graph_pattern_detector.cc
paddle/fluid/framework/ir/graph_pattern_detector.cc
+17
-0
paddle/fluid/framework/ir/graph_pattern_detector.h
paddle/fluid/framework/ir/graph_pattern_detector.h
+10
-0
paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass.cc
.../fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass.cc
+21
-0
paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass.h
...e/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass.h
+3
-0
未找到文件。
paddle/fluid/framework/ir/graph_pattern_detector.cc
浏览文件 @
49108efa
...
@@ -2412,6 +2412,23 @@ PDNode *patterns::OrphanedBfloat16::operator()() {
...
@@ -2412,6 +2412,23 @@ PDNode *patterns::OrphanedBfloat16::operator()() {
return
next_op
;
return
next_op
;
}
}
PDNode
*
patterns
::
UnsupportedBfloat16
::
operator
()()
{
auto
*
prev_op
=
pattern
->
NewNode
(
prev_op_repr
())
->
assert_is_op
();
prev_op
->
assert_more
([
&
](
Node
*
node
)
{
return
node
->
Op
()
->
HasAttr
(
"mkldnn_data_type"
)
==
false
;
});
auto
*
prev_out
=
pattern
->
NewNode
(
prev_out_repr
())
->
AsOutput
();
auto
*
op
=
pattern
->
NewNode
(
op_repr
())
->
assert_is_op
();
op
->
assert_more
([
&
](
Node
*
node
)
{
return
node
->
Op
()
->
GetAttrIfExists
<
std
::
string
>
(
"mkldnn_data_type"
)
==
"bfloat16"
;
});
prev_op
->
LinksTo
({
prev_out
});
op
->
LinksFrom
({
prev_out
});
return
op
;
}
PDNode
*
patterns
::
LastBfloat16Ops
::
operator
()()
{
PDNode
*
patterns
::
LastBfloat16Ops
::
operator
()()
{
auto
*
op
=
pattern
->
NewNode
(
op_repr
())
->
assert_is_op
();
auto
*
op
=
pattern
->
NewNode
(
op_repr
())
->
assert_is_op
();
op
->
assert_more
([
&
](
Node
*
node
)
{
op
->
assert_more
([
&
](
Node
*
node
)
{
...
...
paddle/fluid/framework/ir/graph_pattern_detector.h
浏览文件 @
49108efa
...
@@ -1416,6 +1416,16 @@ struct OrphanedBfloat16 : public PatternBase {
...
@@ -1416,6 +1416,16 @@ struct OrphanedBfloat16 : public PatternBase {
PATTERN_DECL_NODE
(
next_op
);
PATTERN_DECL_NODE
(
next_op
);
};
};
struct
UnsupportedBfloat16
:
public
PatternBase
{
UnsupportedBfloat16
(
PDPattern
*
pattern
,
const
std
::
string
&
name_scope
)
:
PatternBase
(
pattern
,
name_scope
,
"unsupported_bfloat16"
)
{}
PDNode
*
operator
()();
PATTERN_DECL_NODE
(
prev_op
);
PATTERN_DECL_NODE
(
prev_out
);
PATTERN_DECL_NODE
(
op
);
};
struct
LastBfloat16Ops
:
public
PatternBase
{
struct
LastBfloat16Ops
:
public
PatternBase
{
LastBfloat16Ops
(
PDPattern
*
pattern
,
const
std
::
string
&
name_scope
)
LastBfloat16Ops
(
PDPattern
*
pattern
,
const
std
::
string
&
name_scope
)
:
PatternBase
(
pattern
,
name_scope
,
"last_bfloat16_ops"
)
{}
:
PatternBase
(
pattern
,
name_scope
,
"last_bfloat16_ops"
)
{}
...
...
paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass.cc
浏览文件 @
49108efa
...
@@ -71,10 +71,31 @@ void CPUBfloat16PlacementPass::RemoveOrphanedOperators(
...
@@ -71,10 +71,31 @@ void CPUBfloat16PlacementPass::RemoveOrphanedOperators(
gpd
(
graph
,
handler
);
gpd
(
graph
,
handler
);
}
}
void
CPUBfloat16PlacementPass
::
RemoveUnsupportedOperators
(
ir
::
Graph
*
graph
,
int
*
bfloat16_operators
)
const
{
// now quantize is supported FP32 only, so try to find
// bfloat16 operator that input type is not FP32
GraphPatternDetector
gpd
;
patterns
::
UnsupportedBfloat16
unsupported_bfloat16_pattern
{
gpd
.
mutable_pattern
(),
"unsupported_bfloat16"
};
unsupported_bfloat16_pattern
();
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
Graph
*
g
)
{
GET_IR_NODE_FROM_SUBGRAPH
(
prev_out
,
prev_out
,
unsupported_bfloat16_pattern
);
GET_IR_NODE_FROM_SUBGRAPH
(
op
,
op
,
unsupported_bfloat16_pattern
);
if
((
prev_out
->
Var
()
->
GetDataType
()
!=
proto
::
VarType
::
FP32
))
{
op
->
Op
()
->
SetAttr
(
"mkldnn_data_type"
,
std
::
string
(
"float32"
));
bfloat16_operators
--
;
}
};
gpd
(
graph
,
handler
);
}
void
CPUBfloat16PlacementPass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
void
CPUBfloat16PlacementPass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
int
bfloat16_operators
=
0
;
int
bfloat16_operators
=
0
;
SetMkldnnDataType
(
graph
,
&
bfloat16_operators
);
SetMkldnnDataType
(
graph
,
&
bfloat16_operators
);
RemoveOrphanedOperators
(
graph
,
&
bfloat16_operators
);
RemoveOrphanedOperators
(
graph
,
&
bfloat16_operators
);
RemoveUnsupportedOperators
(
graph
,
&
bfloat16_operators
);
PrettyLogDetail
(
"--- marked %d operators to bfloat16 "
,
PrettyLogDetail
(
"--- marked %d operators to bfloat16 "
,
bfloat16_operators
);
bfloat16_operators
);
}
}
...
...
paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_placement_pass.h
浏览文件 @
49108efa
...
@@ -30,6 +30,9 @@ class CPUBfloat16PlacementPass : public Pass {
...
@@ -30,6 +30,9 @@ class CPUBfloat16PlacementPass : public Pass {
void
RemoveOrphanedOperators
(
ir
::
Graph
*
graph
,
int
*
bfloat16_operators
)
const
;
void
RemoveOrphanedOperators
(
ir
::
Graph
*
graph
,
int
*
bfloat16_operators
)
const
;
void
RemoveUnsupportedOperators
(
ir
::
Graph
*
graph
,
int
*
bfloat16_operators
)
const
;
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
};
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录