paddle_pass_builder.h 5.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <sstream>
#include <string>
#include <vector>

21 22 23
/*! \file */

/*! \namespace paddle */
24
namespace paddle {
25 26

/** This is a pass builder based on string. It is part of inference API.
27 28 29 30 31 32
 */
class PaddlePassBuilder {
 public:
  explicit PaddlePassBuilder(const std::vector<std::string> &passes)
      : passes_(passes) {}

33
  /** Append a pass to the end of the passes. */
34 35
  void AppendPass(const std::string &pass_type);

36 37 38 39
  /** Insert a pass to a specific position.
   * @param idx the position to insert.
   * @param pass_type the pass key.
   */
40 41
  void InsertPass(size_t idx, const std::string &pass_type);

42
  /** Delete the `idx`-th pass. */
43 44
  void DeletePass(size_t idx);

45
  /** Delete all the passes that has type `pass_type`. */
46 47
  void DeletePass(const std::string &pass_type);

Y
Yan Chunwei 已提交
48 49 50
  /** Append an analysis pass. */
  void AppendAnalysisPass(const std::string &pass);

51 52 53
  /** Visualize the computation graph after each pass by generating a DOT
   * language file, one can draw them with the Graphviz toolkit.
   */
54 55
  void TurnOnDebug();

56
  /** Human-readible information. */
57 58 59
  std::string DebugString();

  const std::vector<std::string> &AllPasses() const { return passes_; }
Y
Yan Chunwei 已提交
60 61 62 63 64 65 66
  std::vector<std::string> AnalysisPasses() const {
    auto passes = analysis_passes_;
    // To make sure the ir_graph_to_program should be the last pass so any
    // modication of IR will persist to the program.
    passes.push_back("ir_graph_to_program_pass");
    return passes;
  }
67 68

 protected:
Y
Yan Chunwei 已提交
69 70 71
  std::vector<std::string> analysis_passes_{
      {"ir_graph_build_pass", "ir_analysis_pass",
       "ir_params_sync_among_devices_pass"}};
72 73 74
  std::vector<std::string> passes_;
};

75
/**Pass strategy to help control the IR passes.
76 77 78 79 80 81
 */
class PassStrategy : public PaddlePassBuilder {
 public:
  explicit PassStrategy(const std::vector<std::string> &passes)
      : PaddlePassBuilder(passes) {}

82 83 84
  /** The MKLDNN control exists in both CPU and GPU mode, because there can be
   * still some CPU kernels running in CPU mode.
   */
Y
Yan Chunwei 已提交
85
  virtual void EnableMKLDNN() {}
86

87 88
  bool use_gpu() const { return use_gpu_; }

89
  virtual ~PassStrategy() = default;
90 91 92

 protected:
  bool use_gpu_{false};
Y
Yan Chunwei 已提交
93
  bool use_mkldnn_{false};
94 95
};

96
/** The CPU passes controller, it is used in AnalysisPredictor with CPU mode.
97 98 99 100 101 102 103 104 105
 */
class CpuPassStrategy : public PassStrategy {
 public:
  CpuPassStrategy() : PassStrategy({}) {
    // NOTE the large fusions should be located in the front, so that they will
    // not be damaged by smaller ones.
    passes_.assign({
        "infer_clean_graph_pass",         //
        "attention_lstm_fuse_pass",       //
T
tensor-tang 已提交
106
        "seqpool_concat_fuse_pass",       //
107 108 109 110 111 112 113 114
        "seqconv_eltadd_relu_fuse_pass",  //
        // "embedding_fc_lstm_fuse_pass", //
        "fc_lstm_fuse_pass",             //
        "mul_lstm_fuse_pass",            //
        "fc_gru_fuse_pass",              //
        "mul_gru_fuse_pass",             //
        "seq_concat_fc_fuse_pass",       //
        "fc_fuse_pass",                  //
T
tensor-tang 已提交
115
        "repeated_fc_relu_fuse_pass",    //
116
        "squared_mat_sub_fuse_pass",     //
117 118
        "conv_bn_fuse_pass",             //
        "conv_eltwiseadd_bn_fuse_pass",  //
119
        "is_test_pass",                  //
120
    });
121
    use_gpu_ = false;
122 123
  }

Y
Yan Chunwei 已提交
124 125 126
  explicit CpuPassStrategy(const CpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {}

127 128
  virtual ~CpuPassStrategy() = default;

129
  void EnableMKLDNN() override {
130 131
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
Y
Yan Chunwei 已提交
132 133 134 135 136 137 138 139 140 141 142
    if (!use_mkldnn_) {
      passes_.insert(passes_.begin(), "mkldnn_placement_pass");

      for (auto &pass : std::vector<std::string>(
               {"depthwise_conv_mkldnn_pass",    //
                "conv_bias_mkldnn_fuse_pass",    //
                "conv3d_bias_mkldnn_fuse_pass",  //
                "conv_relu_mkldnn_fuse_pass",    //
                "conv_elementwise_add_mkldnn_fuse_pass"})) {
        passes_.push_back(pass);
      }
143
    }
Y
Yan Chunwei 已提交
144 145 146
    use_mkldnn_ = true;
#else
    use_mkldnn_ = false;
147 148 149 150
#endif
  }
};

151
/** The GPU passes strategy, it is used in AnalysisPredictor with GPU mode.
152 153 154 155 156
 */
class GpuPassStrategy : public PassStrategy {
 public:
  GpuPassStrategy() : PassStrategy({}) {
    passes_.assign({
N
nhzlx 已提交
157 158 159 160 161 162 163 164 165 166
      "infer_clean_graph_pass",                        //
          "conv_affine_channel_fuse_pass",             //
          "conv_eltwiseadd_affine_channel_fuse_pass",  //
          "conv_bn_fuse_pass",                         //
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
          "conv_elementwise_add_act_fuse_pass",   //
          "conv_elementwise_add2_act_fuse_pass",  //
          "conv_elementwise_add_fuse_pass",       //
#endif
167
    });
168

169 170 171 172
    for (int i = 6; i >= 3; i--) {
      passes_.push_back("transpose_flatten" + std::to_string(i) +
                        "_concat_fuse_pass");
    }
173
    use_gpu_ = true;
174 175
  }

Y
Yan Chunwei 已提交
176
  explicit GpuPassStrategy(const GpuPassStrategy &other)
177 178 179
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = true;
  }
180

181
  void EnableMKLDNN() override;
182 183 184 185 186

  virtual ~GpuPassStrategy() = default;
};

}  // namespace paddle