Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1db36584
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1db36584
编写于
6月 30, 2021
作者:
W
Wangzheee
提交者:
GitHub
6月 30, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[pass_enhance] mul_gru_fuse_pass; fc_gru_fuse_pass (#33793)
上级
97f86d84
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
144 addition
and
11 deletion
+144
-11
paddle/fluid/framework/ir/fc_gru_fuse_pass.cc
paddle/fluid/framework/ir/fc_gru_fuse_pass.cc
+139
-6
paddle/fluid/framework/ir/fc_gru_fuse_pass.h
paddle/fluid/framework/ir/fc_gru_fuse_pass.h
+5
-5
未找到文件。
paddle/fluid/framework/ir/fc_gru_fuse_pass.cc
浏览文件 @
1db36584
...
@@ -30,8 +30,137 @@ namespace ir {
...
@@ -30,8 +30,137 @@ namespace ir {
class
Node
;
class
Node
;
static
int
BuildFusion
(
Graph
*
graph
,
const
std
::
string
&
name_scope
,
MulGRUFusePass
::
MulGRUFusePass
()
{
Scope
*
scope
,
bool
with_fc_bias
)
{
AddOpCompat
(
OpCompat
(
"gru"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"H0"
)
.
IsTensor
()
.
IsOptional
()
.
End
()
.
AddInput
(
"Weight"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"Bias"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"BatchGate"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"BatchResetHiddenPrev"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"BatchHidden"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"Hidden"
)
.
IsTensor
()
.
End
()
.
AddAttr
(
"activation"
)
.
IsStringIn
({
"sigmoid"
,
"tanh"
,
"relu"
,
"identity"
})
.
End
()
.
AddAttr
(
"gate_activation"
)
.
IsStringIn
({
"sigmoid"
,
"tanh"
,
"relu"
,
"identity"
})
.
End
()
.
AddAttr
(
"is_reverse"
)
.
IsType
<
bool
>
()
.
End
()
.
AddAttr
(
"origin_mode"
)
.
IsType
<
bool
>
()
.
IsOptional
()
.
End
();
AddOpCompat
(
OpCompat
(
"mul"
))
.
AddInput
(
"X"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"Y"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"Out"
)
.
IsTensor
()
.
End
()
.
AddAttr
(
"x_num_col_dims"
)
.
IsNumEQ
(
1
)
.
End
()
.
AddAttr
(
"y_num_col_dims"
)
.
IsNumEQ
(
1
)
.
End
();
}
FCGRUFusePass
::
FCGRUFusePass
()
{
AddOpCompat
(
OpCompat
(
"gru"
))
.
AddInput
(
"Input"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"H0"
)
.
IsTensor
()
.
IsOptional
()
.
End
()
.
AddInput
(
"Weight"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"Bias"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"BatchGate"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"BatchResetHiddenPrev"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"BatchHidden"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"Hidden"
)
.
IsTensor
()
.
End
()
.
AddAttr
(
"activation"
)
.
IsStringIn
({
"sigmoid"
,
"tanh"
,
"relu"
,
"identity"
})
.
End
()
.
AddAttr
(
"gate_activation"
)
.
IsStringIn
({
"sigmoid"
,
"tanh"
,
"relu"
,
"identity"
})
.
End
()
.
AddAttr
(
"is_reverse"
)
.
IsType
<
bool
>
()
.
End
()
.
AddAttr
(
"origin_mode"
)
.
IsType
<
bool
>
()
.
IsOptional
()
.
End
();
AddOpCompat
(
OpCompat
(
"mul"
))
.
AddInput
(
"X"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"Y"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"Out"
)
.
IsTensor
()
.
End
()
.
AddAttr
(
"x_num_col_dims"
)
.
IsNumEQ
(
1
)
.
End
()
.
AddAttr
(
"y_num_col_dims"
)
.
IsNumEQ
(
1
)
.
End
();
AddOpCompat
(
OpCompat
(
"elementwise_add"
))
.
AddInput
(
"X"
)
.
IsTensor
()
.
End
()
.
AddInput
(
"Y"
)
.
IsTensor
()
.
End
()
.
AddOutput
(
"Out"
)
.
IsTensor
()
.
End
()
.
AddAttr
(
"axis"
)
.
IsNumGE
(
-
1
)
.
End
();
}
int
FCGRUFusePass
::
BuildFusion
(
Graph
*
graph
,
const
std
::
string
&
name_scope
,
Scope
*
scope
,
bool
with_fc_bias
)
const
{
GraphPatternDetector
gpd
;
GraphPatternDetector
gpd
;
auto
*
pattern
=
gpd
.
mutable_pattern
();
auto
*
pattern
=
gpd
.
mutable_pattern
();
...
@@ -133,6 +262,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
...
@@ -133,6 +262,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
int
fusion_count
{
0
};
int
fusion_count
{
0
};
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
Graph
*
g
)
{
Graph
*
g
)
{
if
(
!
IsCompat
(
subgraph
,
g
))
{
LOG
(
WARNING
)
<<
"Pass in op compat failed."
;
return
;
}
auto
*
x_n
=
subgraph
.
at
(
x
);
auto
*
x_n
=
subgraph
.
at
(
x
);
GET_IR_NODE_FROM_SUBGRAPH
(
w
,
w
,
fc_pattern
);
GET_IR_NODE_FROM_SUBGRAPH
(
w
,
w
,
fc_pattern
);
GET_IR_NODE_FROM_SUBGRAPH
(
mul
,
mul
,
fc_pattern
);
GET_IR_NODE_FROM_SUBGRAPH
(
mul
,
mul
,
fc_pattern
);
...
@@ -189,8 +322,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
...
@@ -189,8 +322,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
void
MulGRUFusePass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
void
MulGRUFusePass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
FusePassBase
::
Init
(
name_scope_
,
graph
);
FusePassBase
::
Init
(
name_scope_
,
graph
);
int
fusion_count
=
int
fusion_count
=
MulGRUFusePass
::
BuildFusion
(
BuildFusion
(
graph
,
name_scope_
,
param_scope
(),
false
/*with_fc_bias*/
);
graph
,
name_scope_
,
param_scope
(),
false
/*with_fc_bias*/
);
AddStatis
(
fusion_count
);
AddStatis
(
fusion_count
);
}
}
...
@@ -198,8 +331,8 @@ void MulGRUFusePass::ApplyImpl(ir::Graph* graph) const {
...
@@ -198,8 +331,8 @@ void MulGRUFusePass::ApplyImpl(ir::Graph* graph) const {
void
FCGRUFusePass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
void
FCGRUFusePass
::
ApplyImpl
(
ir
::
Graph
*
graph
)
const
{
FusePassBase
::
Init
(
name_scope_
,
graph
);
FusePassBase
::
Init
(
name_scope_
,
graph
);
int
fusion_count
=
int
fusion_count
=
FCGRUFusePass
::
BuildFusion
(
BuildFusion
(
graph
,
name_scope_
,
param_scope
(),
true
/*with_fc_bias*/
);
graph
,
name_scope_
,
param_scope
(),
true
/*with_fc_bias*/
);
AddStatis
(
fusion_count
);
AddStatis
(
fusion_count
);
}
}
...
...
paddle/fluid/framework/ir/fc_gru_fuse_pass.h
浏览文件 @
1db36584
...
@@ -18,7 +18,6 @@
...
@@ -18,7 +18,6 @@
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
...
@@ -26,21 +25,22 @@ namespace ir {
...
@@ -26,21 +25,22 @@ namespace ir {
// The MulGRUFusePass and MulGRUFusePass will fuse to the same FusionGRU op.
// The MulGRUFusePass and MulGRUFusePass will fuse to the same FusionGRU op.
class
Graph
;
class
FCGRUFusePass
:
public
FusePassBase
{
class
FCGRUFusePass
:
public
FusePassBase
{
public:
public:
FCGRUFusePass
();
virtual
~
FCGRUFusePass
()
{}
virtual
~
FCGRUFusePass
()
{}
protected:
protected:
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
void
ApplyImpl
(
ir
::
Graph
*
graph
)
const
override
;
const
std
::
string
name_scope_
{
"fc_gru_fuse"
};
const
std
::
string
name_scope_
{
"fc_gru_fuse"
};
int
BuildFusion
(
Graph
*
graph
,
const
std
::
string
&
name_scope
,
Scope
*
scope
,
bool
with_fc_bias
)
const
;
};
};
// Just FC without bias
// Just FC without bias
class
MulGRUFusePass
:
public
F
usePassBase
{
class
MulGRUFusePass
:
public
F
CGRUFusePass
{
public:
public:
MulGRUFusePass
();
virtual
~
MulGRUFusePass
()
{}
virtual
~
MulGRUFusePass
()
{}
protected:
protected:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录