paddle_pass_builder.h 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <sstream>
#include <string>
#include <vector>

21 22 23
/*! \file */

/*! \namespace paddle */
24
namespace paddle {
25 26

/** This is a pass builder based on string. It is part of inference API.
27 28 29 30 31 32
 */
class PaddlePassBuilder {
 public:
  explicit PaddlePassBuilder(const std::vector<std::string> &passes)
      : passes_(passes) {}

33 34 35 36
  void SetPasses(std::initializer_list<std::string> passes) {
    passes_ = passes;
  }

37
  /** Append a pass to the end of the passes. */
38 39
  void AppendPass(const std::string &pass_type);

40 41 42 43
  /** Insert a pass to a specific position.
   * @param idx the position to insert.
   * @param pass_type the pass key.
   */
44 45
  void InsertPass(size_t idx, const std::string &pass_type);

46
  /** Delete the `idx`-th pass. */
47 48
  void DeletePass(size_t idx);

49
  /** Delete all the passes that has type `pass_type`. */
50 51
  void DeletePass(const std::string &pass_type);

52
  void ClearPasses();
Y
Yan Chunwei 已提交
53 54 55
  /** Append an analysis pass. */
  void AppendAnalysisPass(const std::string &pass);

56 57 58
  /** Visualize the computation graph after each pass by generating a DOT
   * language file, one can draw them with the Graphviz toolkit.
   */
59 60
  void TurnOnDebug();

61
  /** Human-readible information. */
62 63 64
  std::string DebugString();

  const std::vector<std::string> &AllPasses() const { return passes_; }
Y
Yan Chunwei 已提交
65 66 67 68 69 70 71
  std::vector<std::string> AnalysisPasses() const {
    auto passes = analysis_passes_;
    // To make sure the ir_graph_to_program should be the last pass so any
    // modication of IR will persist to the program.
    passes.push_back("ir_graph_to_program_pass");
    return passes;
  }
72 73

 protected:
Y
Yan Chunwei 已提交
74 75 76
  std::vector<std::string> analysis_passes_{
      {"ir_graph_build_pass", "ir_analysis_pass",
       "ir_params_sync_among_devices_pass"}};
77 78 79
  std::vector<std::string> passes_;
};

80
/**Pass strategy to help control the IR passes.
81 82 83 84 85 86
 */
class PassStrategy : public PaddlePassBuilder {
 public:
  explicit PassStrategy(const std::vector<std::string> &passes)
      : PaddlePassBuilder(passes) {}

87 88 89
  /** The MKLDNN control exists in both CPU and GPU mode, because there can be
   * still some CPU kernels running in CPU mode.
   */
Y
Yan Chunwei 已提交
90
  virtual void EnableMKLDNN() {}
91

92
  /** Enable MKLDNN quantize optimization
93
   */
94
  virtual void EnableMkldnnQuantizer() {}
95

96 97
  bool use_gpu() const { return use_gpu_; }

98
  virtual ~PassStrategy() = default;
99 100 101

 protected:
  bool use_gpu_{false};
Y
Yan Chunwei 已提交
102
  bool use_mkldnn_{false};
103 104
};

105
/** The CPU passes controller, it is used in AnalysisPredictor with CPU mode.
106 107 108
 */
class CpuPassStrategy : public PassStrategy {
 public:
109
  CpuPassStrategy();
110

Y
Yan Chunwei 已提交
111 112 113
  explicit CpuPassStrategy(const CpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {}

114 115
  virtual ~CpuPassStrategy() = default;

116
  void EnableMKLDNN() override {
117 118
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
Y
Yan Chunwei 已提交
119 120 121 122 123 124 125 126 127 128 129
    if (!use_mkldnn_) {
      passes_.insert(passes_.begin(), "mkldnn_placement_pass");

      for (auto &pass : std::vector<std::string>(
               {"depthwise_conv_mkldnn_pass",    //
                "conv_bias_mkldnn_fuse_pass",    //
                "conv3d_bias_mkldnn_fuse_pass",  //
                "conv_relu_mkldnn_fuse_pass",    //
                "conv_elementwise_add_mkldnn_fuse_pass"})) {
        passes_.push_back(pass);
      }
130
    }
Y
Yan Chunwei 已提交
131 132 133
    use_mkldnn_ = true;
#else
    use_mkldnn_ = false;
134 135
#endif
  }
136

137 138 139
  void EnableMkldnnQuantizer() override {
#ifdef PADDLE_WITH_MKLDNN
    if (!use_mkldnn_quantizer_) {
140 141
      passes_.push_back("cpu_quantize_placement_pass");
    }
142 143 144 145
    use_mkldnn_quantizer_ = true;
#else
    use_mkldnn_quantizer_ = false;
#endif
146 147 148
  }

 protected:
149
  bool use_mkldnn_quantizer_{false};
150 151
};

152
/** The GPU passes strategy, it is used in AnalysisPredictor with GPU mode.
153 154 155
 */
class GpuPassStrategy : public PassStrategy {
 public:
156
  GpuPassStrategy();
157

Y
Yan Chunwei 已提交
158
  explicit GpuPassStrategy(const GpuPassStrategy &other)
159 160 161
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = true;
  }
162

163
  void EnableMKLDNN() override;
164
  void EnableMkldnnQuantizer() override;
165 166 167 168

  virtual ~GpuPassStrategy() = default;
};

169 170
extern const std::vector<std::string> kAnakinSubgraphPasses;

171
}  // namespace paddle