paddle_pass_builder.h 4.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <sstream>
#include <string>
#include <vector>

21 22 23
/*! \file */

/*! \namespace paddle */
24
namespace paddle {
25 26

/** This is a pass builder based on string. It is part of inference API.
27 28 29 30 31 32
 */
class PaddlePassBuilder {
 public:
  explicit PaddlePassBuilder(const std::vector<std::string> &passes)
      : passes_(passes) {}

33
  /** Append a pass to the end of the passes. */
34 35
  void AppendPass(const std::string &pass_type);

36 37 38 39
  /** Insert a pass to a specific position.
   * @param idx the position to insert.
   * @param pass_type the pass key.
   */
40 41
  void InsertPass(size_t idx, const std::string &pass_type);

42
  /** Delete the `idx`-th pass. */
43 44
  void DeletePass(size_t idx);

45
  /** Delete all the passes that has type `pass_type`. */
46 47
  void DeletePass(const std::string &pass_type);

48
  void ClearPasses();
Y
Yan Chunwei 已提交
49 50 51
  /** Append an analysis pass. */
  void AppendAnalysisPass(const std::string &pass);

52 53 54
  /** Visualize the computation graph after each pass by generating a DOT
   * language file, one can draw them with the Graphviz toolkit.
   */
55 56
  void TurnOnDebug();

57
  /** Human-readible information. */
58 59 60
  std::string DebugString();

  const std::vector<std::string> &AllPasses() const { return passes_; }
Y
Yan Chunwei 已提交
61 62 63 64 65 66 67
  std::vector<std::string> AnalysisPasses() const {
    auto passes = analysis_passes_;
    // To make sure the ir_graph_to_program should be the last pass so any
    // modication of IR will persist to the program.
    passes.push_back("ir_graph_to_program_pass");
    return passes;
  }
68 69

 protected:
Y
Yan Chunwei 已提交
70 71 72
  std::vector<std::string> analysis_passes_{
      {"ir_graph_build_pass", "ir_analysis_pass",
       "ir_params_sync_among_devices_pass"}};
73 74 75
  std::vector<std::string> passes_;
};

76
/**Pass strategy to help control the IR passes.
77 78 79 80 81 82
 */
class PassStrategy : public PaddlePassBuilder {
 public:
  explicit PassStrategy(const std::vector<std::string> &passes)
      : PaddlePassBuilder(passes) {}

83 84 85
  /** The MKLDNN control exists in both CPU and GPU mode, because there can be
   * still some CPU kernels running in CPU mode.
   */
Y
Yan Chunwei 已提交
86
  virtual void EnableMKLDNN() {}
87

88 89 90 91
  /** Enable quantize optimization
   */
  virtual void EnableQuantizer() {}

92 93
  bool use_gpu() const { return use_gpu_; }

94
  virtual ~PassStrategy() = default;
95 96 97

 protected:
  bool use_gpu_{false};
Y
Yan Chunwei 已提交
98
  bool use_mkldnn_{false};
99 100
};

101
/** The CPU passes controller, it is used in AnalysisPredictor with CPU mode.
102 103 104
 */
class CpuPassStrategy : public PassStrategy {
 public:
105
  CpuPassStrategy();
106

Y
Yan Chunwei 已提交
107 108 109
  explicit CpuPassStrategy(const CpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {}

110 111
  virtual ~CpuPassStrategy() = default;

112
  void EnableMKLDNN() override {
113 114
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
Y
Yan Chunwei 已提交
115 116 117 118 119 120 121 122 123 124 125
    if (!use_mkldnn_) {
      passes_.insert(passes_.begin(), "mkldnn_placement_pass");

      for (auto &pass : std::vector<std::string>(
               {"depthwise_conv_mkldnn_pass",    //
                "conv_bias_mkldnn_fuse_pass",    //
                "conv3d_bias_mkldnn_fuse_pass",  //
                "conv_relu_mkldnn_fuse_pass",    //
                "conv_elementwise_add_mkldnn_fuse_pass"})) {
        passes_.push_back(pass);
      }
126
    }
Y
Yan Chunwei 已提交
127 128 129
    use_mkldnn_ = true;
#else
    use_mkldnn_ = false;
130 131
#endif
  }
132 133 134 135 136 137 138 139 140 141

  void EnableQuantizer() override {
    if (!use_quantizer_) {
      passes_.push_back("cpu_quantize_placement_pass");
    }
    use_quantizer_ = true;
  }

 protected:
  bool use_quantizer_{false};
142 143
};

144
/** The GPU passes strategy, it is used in AnalysisPredictor with GPU mode.
145 146 147
 */
class GpuPassStrategy : public PassStrategy {
 public:
148
  GpuPassStrategy();
149

Y
Yan Chunwei 已提交
150
  explicit GpuPassStrategy(const GpuPassStrategy &other)
151 152 153
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = true;
  }
154

155
  void EnableMKLDNN() override;
156
  void EnableQuantizer() override;
157 158 159 160

  virtual ~GpuPassStrategy() = default;
};

161 162
extern const std::vector<std::string> kAnakinSubgraphPasses;

163
}  // namespace paddle