fc_gru_fuse_pass.cc 6.5 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/ir/fc_gru_fuse_pass.h"
#include <string>
17
#include <unordered_set>
T
tensor-tang 已提交
18 19 20 21 22 23
#include "paddle/fluid/framework/lod_tensor.h"

namespace paddle {
namespace framework {
namespace ir {

T
tensor-tang 已提交
24 25
static int BuildFusion(Graph* graph, const std::string& name_scope,
                       Scope* scope, bool with_fc_bias) {
T
tensor-tang 已提交
26 27 28
  GraphPatternDetector gpd;
  auto* pattern = gpd.mutable_pattern();

Y
Yan Chunwei 已提交
29 30 31 32 33 34 35
  // Create pattern.
  patterns::FC fc_pattern(pattern, name_scope);
  patterns::GRU gru_pattern(pattern, name_scope);

  PDNode* x =
      pattern->NewNode(patterns::UniqueKey("x"))->assert_var_not_persistable();

36
  auto* fc_out = fc_pattern(x, with_fc_bias, /* with_relu */ false);
Y
Yan Chunwei 已提交
37 38
  fc_out->AsIntermediate();  // fc_out is a tmp var, will be removed after fuse.
  gru_pattern(fc_out);
T
tensor-tang 已提交
39 40

  // Create New OpDesc
Y
Yan Chunwei 已提交
41 42
  auto gru_creater = [&](Node* gru, Node* x, Node* weight_x, Node* weight_h,
                         Node* bias, Node* hidden, Node* fc_bias) {
T
tensor-tang 已提交
43 44
    OpDesc op_desc;
    op_desc.SetType("fusion_gru");
T
tensor-tang 已提交
45 46

#define NEW_NAME(x) name_scope + "/at." #x ".new"
Y
Yan Chunwei 已提交
47
#define SET_IN(Key, node__) op_desc.SetInput(#Key, {node__->Name()});
T
tensor-tang 已提交
48 49 50
    SET_IN(X, x);
    SET_IN(WeightX, weight_x);
    SET_IN(WeightH, weight_h);
T
tensor-tang 已提交
51
    if (with_fc_bias) {
Y
Yan Chunwei 已提交
52
      op_desc.SetInput("Bias", {NEW_NAME(bias) + bias->Name()});
T
tensor-tang 已提交
53 54 55
    } else {
      SET_IN(Bias, bias);
    }
T
tensor-tang 已提交
56
#undef SET_IN
T
tensor-tang 已提交
57
    op_desc.SetInput("H0", {});
Y
Yan Chunwei 已提交
58 59
    op_desc.SetOutput("Hidden", {hidden->Name()});
    op_desc.SetAttr("is_reverse", gru->Op()->GetAttr("is_reverse"));
T
tensor-tang 已提交
60 61 62 63 64 65 66 67 68 69 70 71
    // TODO(TJ): This should be a option for infer
    op_desc.SetAttr("use_seq", true);

#define SET_IMTERMEDIATE_OUT(key) op_desc.SetOutput(#key, {NEW_NAME(key)})
    SET_IMTERMEDIATE_OUT(ReorderedH0);
    SET_IMTERMEDIATE_OUT(XX);
    SET_IMTERMEDIATE_OUT(BatchedInput);
    SET_IMTERMEDIATE_OUT(BatchedOut);
#undef SET_IMTERMEDIATE_OUT

    auto* op = graph->CreateOpNode(&op_desc);
    PADDLE_ENFORCE(graph->Has(kParamScopeAttr));
72
    auto& scope = graph->Get<Scope>(kParamScopeAttr);
T
tensor-tang 已提交
73
    if (with_fc_bias) {
T
tensor-tang 已提交
74
      // Fusion GRU bias = fcbias + grubias
75
      auto* fusion_bias_var = scope.Var(NEW_NAME(bias) + bias->Name());
T
tensor-tang 已提交
76 77 78
      auto* out_bias_tensor =
          fusion_bias_var->GetMutable<framework::LoDTensor>();
      PADDLE_ENFORCE(fusion_bias_var);
79 80
      auto* gru_bias_var = scope.FindVar(bias->Name());
      auto* fc_bias_var = scope.FindVar(fc_bias->Name());
T
tensor-tang 已提交
81
      PADDLE_ENFORCE(gru_bias_var);
T
tensor-tang 已提交
82
      PADDLE_ENFORCE(fc_bias_var);
T
tensor-tang 已提交
83 84 85
      const auto& gru_bias_tenosr = gru_bias_var->Get<framework::LoDTensor>();
      const auto& fc_bias_tensor = fc_bias_var->Get<framework::LoDTensor>();
      // new bias = fc bias + gru bias
T
tensor-tang 已提交
86 87 88
      out_bias_tensor->Resize(gru_bias_tenosr.dims());
      auto* data = out_bias_tensor->mutable_data<float>(platform::CPUPlace());
      for (int i = 0; i < out_bias_tensor->numel(); i++) {
T
tensor-tang 已提交
89 90 91 92 93 94
        data[i] =
            fc_bias_tensor.data<float>()[i] + gru_bias_tenosr.data<float>()[i];
      }
    }
#undef GET_NODE

95 96 97 98 99 100
#define NEW_IMTERMEDIATE_OUT(key)                \
  VarDesc key(NEW_NAME(key));                    \
  key.SetPersistable(false);                     \
  auto* key##_node = graph->CreateVarNode(&key); \
  IR_NODE_LINK_TO(op, key##_node);

T
tensor-tang 已提交
101 102 103 104 105 106
    NEW_IMTERMEDIATE_OUT(ReorderedH0);
    NEW_IMTERMEDIATE_OUT(XX);
    NEW_IMTERMEDIATE_OUT(BatchedInput);
    NEW_IMTERMEDIATE_OUT(BatchedOut);
#undef NEW_NAME
#undef NEW_IMTERMEDIATE_OUT
T
tensor-tang 已提交
107

Y
Yan Chunwei 已提交
108 109 110 111 112
    IR_NODE_LINK_TO(x, op);
    IR_NODE_LINK_TO(weight_x, op);
    IR_NODE_LINK_TO(weight_h, op);
    IR_NODE_LINK_TO(bias, op);  // actually should link to new bias if have
    IR_NODE_LINK_TO(op, hidden);
T
tensor-tang 已提交
113 114 115 116 117 118 119
    // h0?
    return op;
  };

  int fusion_count{0};
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
Y
Yan Chunwei 已提交
120 121 122
    auto* x_n = subgraph.at(x);
    GET_IR_NODE_FROM_SUBGRAPH(w, w, fc_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(mul, mul, fc_pattern);
123
    GET_IR_NODE_FROM_SUBGRAPH(fc_out, elementwise_add_out, fc_pattern);
Y
Yan Chunwei 已提交
124 125 126 127
    GET_IR_NODE_FROM_SUBGRAPH(Weight, Weight, gru_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(gru, gru, gru_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(Bias, Bias, gru_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(Hidden, Hidden, gru_pattern);
T
tensor-tang 已提交
128
    // nodes need be removed
Y
Yan Chunwei 已提交
129 130 131
    GET_IR_NODE_FROM_SUBGRAPH(BatchGate, BatchGate, gru_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(BatchResetHiddenPrev, BatchGate, gru_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(BatchHidden, BatchGate, gru_pattern);
T
tensor-tang 已提交
132 133

    if (with_fc_bias) {
Y
Yan Chunwei 已提交
134 135 136 137 138
      GET_IR_NODE_FROM_SUBGRAPH(mul_out, mul_out, fc_pattern);
      GET_IR_NODE_FROM_SUBGRAPH(fc_bias, bias, fc_pattern);
      GET_IR_NODE_FROM_SUBGRAPH(elementwise_add, elementwise_add, fc_pattern);

      gru_creater(gru, x_n, w, Weight, Bias, Hidden, fc_bias);
T
tensor-tang 已提交
139 140
      // Remove unneeded nodes.
      std::unordered_set<const Node*> marked_nodes(
Y
Yan Chunwei 已提交
141 142
          {mul, gru, elementwise_add, fc_bias, fc_out, mul_out, BatchGate,
           BatchResetHiddenPrev, BatchHidden});
T
tensor-tang 已提交
143 144
      GraphSafeRemoveNodes(graph, marked_nodes);
    } else {
Y
Yan Chunwei 已提交
145
      gru_creater(gru, x_n, w, Weight, Bias, Hidden, nullptr);
T
tensor-tang 已提交
146
      // Remove unneeded nodes.
T
tensor-tang 已提交
147
      std::unordered_set<const Node*> marked_nodes(
Y
Yan Chunwei 已提交
148
          {mul, gru, BatchGate, BatchResetHiddenPrev, BatchHidden});
T
tensor-tang 已提交
149 150 151 152 153 154 155 156 157 158 159 160
      GraphSafeRemoveNodes(graph, marked_nodes);
    }
#undef GET_NODE

    ++fusion_count;
  };

  gpd(graph, handler);

  return fusion_count;
}

161 162
void MulGRUFusePass::ApplyImpl(ir::Graph* graph) const {
  FusePassBase::Init(name_scope_, graph);
T
tensor-tang 已提交
163

164 165
  int fusion_count =
      BuildFusion(graph, name_scope_, param_scope(), false /*with_fc_bias*/);
T
tensor-tang 已提交
166 167 168 169

  AddStatis(fusion_count);
}

170 171
void FCGRUFusePass::ApplyImpl(ir::Graph* graph) const {
  FusePassBase::Init(name_scope_, graph);
T
tensor-tang 已提交
172

173 174
  int fusion_count =
      BuildFusion(graph, name_scope_, param_scope(), true /*with_fc_bias*/);
T
tensor-tang 已提交
175 176 177 178 179 180 181 182

  AddStatis(fusion_count);
}

}  // namespace ir
}  // namespace framework
}  // namespace paddle

T
tensor-tang 已提交
183 184
REGISTER_PASS(mul_gru_fuse_pass, paddle::framework::ir::MulGRUFusePass);
REGISTER_PASS(fc_gru_fuse_pass, paddle::framework::ir::FCGRUFusePass);