未验证 提交 1db36584 编写于 作者: W Wangzheee 提交者: GitHub

[pass_enhance] mul_gru_fuse_pass; fc_gru_fuse_pass (#33793)

上级 97f86d84
...@@ -30,8 +30,137 @@ namespace ir { ...@@ -30,8 +30,137 @@ namespace ir {
class Node; class Node;
static int BuildFusion(Graph* graph, const std::string& name_scope, MulGRUFusePass::MulGRUFusePass() {
Scope* scope, bool with_fc_bias) { AddOpCompat(OpCompat("gru"))
.AddInput("Input")
.IsTensor()
.End()
.AddInput("H0")
.IsTensor()
.IsOptional()
.End()
.AddInput("Weight")
.IsTensor()
.End()
.AddInput("Bias")
.IsTensor()
.End()
.AddOutput("BatchGate")
.IsTensor()
.End()
.AddOutput("BatchResetHiddenPrev")
.IsTensor()
.End()
.AddOutput("BatchHidden")
.IsTensor()
.End()
.AddOutput("Hidden")
.IsTensor()
.End()
.AddAttr("activation")
.IsStringIn({"sigmoid", "tanh", "relu", "identity"})
.End()
.AddAttr("gate_activation")
.IsStringIn({"sigmoid", "tanh", "relu", "identity"})
.End()
.AddAttr("is_reverse")
.IsType<bool>()
.End()
.AddAttr("origin_mode")
.IsType<bool>()
.IsOptional()
.End();
AddOpCompat(OpCompat("mul"))
.AddInput("X")
.IsTensor()
.End()
.AddInput("Y")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("x_num_col_dims")
.IsNumEQ(1)
.End()
.AddAttr("y_num_col_dims")
.IsNumEQ(1)
.End();
}
FCGRUFusePass::FCGRUFusePass() {
AddOpCompat(OpCompat("gru"))
.AddInput("Input")
.IsTensor()
.End()
.AddInput("H0")
.IsTensor()
.IsOptional()
.End()
.AddInput("Weight")
.IsTensor()
.End()
.AddInput("Bias")
.IsTensor()
.End()
.AddOutput("BatchGate")
.IsTensor()
.End()
.AddOutput("BatchResetHiddenPrev")
.IsTensor()
.End()
.AddOutput("BatchHidden")
.IsTensor()
.End()
.AddOutput("Hidden")
.IsTensor()
.End()
.AddAttr("activation")
.IsStringIn({"sigmoid", "tanh", "relu", "identity"})
.End()
.AddAttr("gate_activation")
.IsStringIn({"sigmoid", "tanh", "relu", "identity"})
.End()
.AddAttr("is_reverse")
.IsType<bool>()
.End()
.AddAttr("origin_mode")
.IsType<bool>()
.IsOptional()
.End();
AddOpCompat(OpCompat("mul"))
.AddInput("X")
.IsTensor()
.End()
.AddInput("Y")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("x_num_col_dims")
.IsNumEQ(1)
.End()
.AddAttr("y_num_col_dims")
.IsNumEQ(1)
.End();
AddOpCompat(OpCompat("elementwise_add"))
.AddInput("X")
.IsTensor()
.End()
.AddInput("Y")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("axis")
.IsNumGE(-1)
.End();
}
int FCGRUFusePass::BuildFusion(Graph* graph, const std::string& name_scope,
Scope* scope, bool with_fc_bias) const {
GraphPatternDetector gpd; GraphPatternDetector gpd;
auto* pattern = gpd.mutable_pattern(); auto* pattern = gpd.mutable_pattern();
...@@ -133,6 +262,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope, ...@@ -133,6 +262,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
int fusion_count{0}; int fusion_count{0};
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
Graph* g) { Graph* g) {
if (!IsCompat(subgraph, g)) {
LOG(WARNING) << "Pass in op compat failed.";
return;
}
auto* x_n = subgraph.at(x); auto* x_n = subgraph.at(x);
GET_IR_NODE_FROM_SUBGRAPH(w, w, fc_pattern); GET_IR_NODE_FROM_SUBGRAPH(w, w, fc_pattern);
GET_IR_NODE_FROM_SUBGRAPH(mul, mul, fc_pattern); GET_IR_NODE_FROM_SUBGRAPH(mul, mul, fc_pattern);
...@@ -189,8 +322,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope, ...@@ -189,8 +322,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
void MulGRUFusePass::ApplyImpl(ir::Graph* graph) const { void MulGRUFusePass::ApplyImpl(ir::Graph* graph) const {
FusePassBase::Init(name_scope_, graph); FusePassBase::Init(name_scope_, graph);
int fusion_count = int fusion_count = MulGRUFusePass::BuildFusion(
BuildFusion(graph, name_scope_, param_scope(), false /*with_fc_bias*/); graph, name_scope_, param_scope(), false /*with_fc_bias*/);
AddStatis(fusion_count); AddStatis(fusion_count);
} }
...@@ -198,8 +331,8 @@ void MulGRUFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -198,8 +331,8 @@ void MulGRUFusePass::ApplyImpl(ir::Graph* graph) const {
void FCGRUFusePass::ApplyImpl(ir::Graph* graph) const { void FCGRUFusePass::ApplyImpl(ir::Graph* graph) const {
FusePassBase::Init(name_scope_, graph); FusePassBase::Init(name_scope_, graph);
int fusion_count = int fusion_count = FCGRUFusePass::BuildFusion(
BuildFusion(graph, name_scope_, param_scope(), true /*with_fc_bias*/); graph, name_scope_, param_scope(), true /*with_fc_bias*/);
AddStatis(fusion_count); AddStatis(fusion_count);
} }
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -26,21 +25,22 @@ namespace ir { ...@@ -26,21 +25,22 @@ namespace ir {
// The MulGRUFusePass and MulGRUFusePass will fuse to the same FusionGRU op. // The MulGRUFusePass and MulGRUFusePass will fuse to the same FusionGRU op.
class Graph;
class FCGRUFusePass : public FusePassBase { class FCGRUFusePass : public FusePassBase {
public: public:
FCGRUFusePass();
virtual ~FCGRUFusePass() {} virtual ~FCGRUFusePass() {}
protected: protected:
void ApplyImpl(ir::Graph* graph) const override; void ApplyImpl(ir::Graph* graph) const override;
const std::string name_scope_{"fc_gru_fuse"}; const std::string name_scope_{"fc_gru_fuse"};
int BuildFusion(Graph* graph, const std::string& name_scope, Scope* scope,
bool with_fc_bias) const;
}; };
// Just FC without bias // Just FC without bias
class MulGRUFusePass : public FusePassBase { class MulGRUFusePass : public FCGRUFusePass {
public: public:
MulGRUFusePass();
virtual ~MulGRUFusePass() {} virtual ~MulGRUFusePass() {}
protected: protected:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册