paddle_pass_builder.h 10.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <sstream>
#include <string>
#include <vector>
W
wanghuancoder 已提交
20

21
#include "paddle_infer_declare.h"  // NOLINT
22

23 24 25 26 27 28 29 30 31 32 33 34
///
/// \file paddle_pass_builder.h
///
/// \brief Class Paddle Passs Builder and its subclasses(pass strategies).
/// \section sec_intro Introduction
/// This class aims to build passes for paddle and define passes' strategies.
///
/// \author paddle-infer@baidu.com
/// \date 2020-3-23
/// \since 1.7

/// \namespace paddle
35
namespace paddle {
36

37 38 39 40 41 42 43 44 45 46 47
/// \class PaddlePassBuilder
/// \brief This class build passes based on vector<string> input. It is part of
/// inference API. Users can build passes, insert new passes, delete passes
/// using this class and its functions.
///
/// Example Usage:
///     Build a new pass.
/// \code{cpp}
/// const vector<string> passes(1, "conv_relu_mkldnn_fuse_pass");
/// PaddlePassBuilder builder(passes);
/// \endcode
48
class PD_INFER_DECL PaddlePassBuilder {
49
 public:
50 51
  /// \brief Constructor of the class. It stores the input passes.
  /// \param[in] passes passes' types.
52 53 54
  explicit PaddlePassBuilder(const std::vector<std::string> &passes)
      : passes_(passes) {}

55 56
  /// \brief Stores the input passes.
  /// \param[in] passes passes' types.
57 58 59 60
  void SetPasses(std::initializer_list<std::string> passes) {
    passes_ = passes;
  }

61 62
  /// \brief Append a pass to the end of the passes.
  /// \param[in] pass_type the type of the new pass.
63 64
  void AppendPass(const std::string &pass_type);

65 66 67
  /// \brief Insert a pass to a specific position.
  /// \param[in] idx the position to insert.
  /// \param[in] pass_type the type of insert pass.
68 69
  void InsertPass(size_t idx, const std::string &pass_type);

70 71
  /// \brief Delete the pass at certain position 'idx'.
  /// \param[in] idx the position to delete.
72 73
  void DeletePass(size_t idx);

74 75 76 77
  /// \brief Get the certain position of a pass.
  /// \param[in] pass_type the type of insert pass.
  size_t GetPassIndex(const std::string &pass_type);

78 79
  /// \brief Delete all passes that has a certain type 'pass_type'.
  /// \param[in] pass_type the certain pass type to be deleted.
80 81
  void DeletePass(const std::string &pass_type);

82
  /// \brief Delete all the passes.
83
  void ClearPasses();
84 85 86

  /// \brief Append an analysis pass.
  /// \param[in] pass the type of the new analysis pass.
Y
Yan Chunwei 已提交
87 88
  void AppendAnalysisPass(const std::string &pass);

89 90
  /// \brief Visualize the computation graph after each pass by generating a DOT
  /// language file, one can draw them with the Graphviz toolkit.
91
  void TurnOnDebug();
92
  /// \brief Human-readable information of the passes.
93 94
  std::string DebugString();

95 96
  /// \brief Get information of passes.
  /// \return Return list of the passes.
97
  const std::vector<std::string> &AllPasses() const { return passes_; }
98 99 100

  /// \brief Get information of analysis passes.
  /// \return Return list of analysis passes.
Y
Yan Chunwei 已提交
101 102 103 104 105 106 107
  std::vector<std::string> AnalysisPasses() const {
    auto passes = analysis_passes_;
    // To make sure the ir_graph_to_program should be the last pass so any
    // modication of IR will persist to the program.
    passes.push_back("ir_graph_to_program_pass");
    return passes;
  }
108 109

 protected:
110
  /// \cond Protected
Y
Yan Chunwei 已提交
111
  std::vector<std::string> analysis_passes_{
112
      {"ir_graph_build_pass", "ir_graph_clean_pass", "ir_analysis_pass",
113 114
       "ir_params_sync_among_devices_pass", "adjust_cudnn_workspace_size_pass",
       "inference_op_replace_pass"}};
115
  std::vector<std::string> passes_;
116
  /// \endcond
117 118
};

119 120 121
/// \class PassStrategy
/// \brief This class defines the pass strategies like whether to use gpu/cuDNN
/// kernel/MKLDNN.
122
class PD_INFER_DECL PassStrategy : public PaddlePassBuilder {
123
 public:
124 125
  /// \brief Constructor of PassStrategy class. It works the same as
  /// PaddlePassBuilder class. \param[in] passes passes' types.
126 127 128
  explicit PassStrategy(const std::vector<std::string> &passes)
      : PaddlePassBuilder(passes) {}

129
  /// \brief Enable the use of cuDNN kernel.
130 131
  virtual void EnableCUDNN() {}

132 133 134
  /// \brief Enable use gpu fp16 kernel.
  virtual void Exp_EnableUseGpuFp16() {}

135 136 137
  /// \brief Enable the use of MKLDNN.
  /// The MKLDNN control exists in both CPU and GPU mode, because there can
  /// still be some CPU kernels running in GPU mode.
Y
Yan Chunwei 已提交
138
  virtual void EnableMKLDNN() {}
139

140
  /// \brief Enable MKLDNN quantize optimization.
141
  virtual void EnableMkldnnQuantizer() {}
142

143 144 145
  /// \brief Enable MKLDNN bfloat16.
  virtual void EnableMkldnnBfloat16() {}

B
baoachun 已提交
146 147 148
  /// \brief Enable MKLDNN int8.
  virtual void EnableMkldnnInt8() {}

149 150
  /// \brief Check if we are using gpu.
  /// \return A bool variable implying whether we are in gpu mode.
151 152
  bool use_gpu() const { return use_gpu_; }

153 154 155 156
  /// \brief Check if we are using gpu fp16 kernel.
  /// \return A bool variable implying whether we are in gpu fp16 mode.
  bool use_gpu_fp16() const { return use_gpu_fp16_; }

157 158 159 160
  /// \brief Check if we are using xpu.
  /// \return A bool variable implying whether we are in xpu mode.
  bool use_xpu() const { return use_xpu_; }

W
Wilber 已提交
161 162 163 164
  /// \brief Check if we are using npu.
  /// \return A bool variable implying whether we are in npu mode.
  bool use_npu() const { return use_npu_; }

J
jianghaicheng 已提交
165 166 167 168
  /// \brief Check if we are using ipu.
  /// \return A bool variable implying whether we are in ipu mode.
  bool use_ipu() const { return use_ipu_; }

169
  /// \brief Default destructor.
170
  virtual ~PassStrategy() = default;
171 172

 protected:
173
  /// \cond Protected
174
  bool use_xpu_{false};
175
  bool use_gpu_{false};
W
Wilber 已提交
176
  bool use_npu_{false};
J
jianghaicheng 已提交
177
  bool use_ipu_{false};
Y
Yan Chunwei 已提交
178
  bool use_mkldnn_{false};
179
  bool use_gpu_fp16_{false};
180
  /// \endcond
181 182
};

183 184 185
/// \class CpuPassStrategy
/// \brief The CPU passes controller, it is used in AnalysisPredictor with CPU
/// mode.
186
class PD_INFER_DECL CpuPassStrategy : public PassStrategy {
187
 public:
188
  /// \brief Default constructor of CpuPassStrategy.
189
  CpuPassStrategy();
190

191 192
  /// \brief Construct by copying another CpuPassStrategy object.
  /// \param[in] other The CpuPassStrategy object we want to copy.
Y
Yan Chunwei 已提交
193
  explicit CpuPassStrategy(const CpuPassStrategy &other)
W
Wojciech Uss 已提交
194 195 196 197
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = other.use_gpu_;
    use_mkldnn_ = other.use_mkldnn_;
    use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
198
    use_mkldnn_bfloat16_ = other.use_mkldnn_bfloat16_;
B
baoachun 已提交
199
    use_mkldnn_int8_ = other.use_mkldnn_int8_;
W
Wojciech Uss 已提交
200
  }
201
  /// \brief Default destructor.
202 203
  virtual ~CpuPassStrategy() = default;

204
  /// \brief Enable the use of cuDNN kernel.
205
  void EnableCUDNN() override;
206 207

  /// \brief Enable the use of MKLDNN.
W
Wojciech Uss 已提交
208
  void EnableMKLDNN() override;
209 210

  /// \brief Enable MKLDNN quantize optimization.
W
Wojciech Uss 已提交
211
  void EnableMkldnnQuantizer() override;
212

213 214 215
  /// \brief Enable MKLDNN bfloat16.
  void EnableMkldnnBfloat16() override;

B
baoachun 已提交
216 217 218
  /// \brief Enable MKLDNN int8.
  void EnableMkldnnInt8() override;

219
 protected:
220
  /// \cond Protected
221
  bool use_mkldnn_quantizer_{false};
222
  bool use_mkldnn_bfloat16_{false};
B
baoachun 已提交
223
  bool use_mkldnn_int8_{false};
224
  /// \endcond
225 226
};

227 228 229
/// \class GpuPassStrategy
/// \brief The GPU passes controller, it is used in AnalysisPredictor with GPU
/// mode.
230
class PD_INFER_DECL GpuPassStrategy : public PassStrategy {
231
 public:
232
  /// \brief Default constructor of GpuPassStrategy.
233
  GpuPassStrategy();
234

235 236
  /// \brief Construct by copying another GpuPassStrategy object.
  /// \param[in] other The GpuPassStrategy object we want to copy.
Y
Yan Chunwei 已提交
237
  explicit GpuPassStrategy(const GpuPassStrategy &other)
238 239
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = true;
240
    use_cudnn_ = other.use_cudnn_;
241
  }
242

243
  /// \brief Enable the use of cuDNN kernel.
244
  void EnableCUDNN() override;
245

246 247 248
  /// \brief Enable the use of gpu fp16 kernel.
  void Exp_EnableUseGpuFp16() override;

249
  /// \brief Not supported in GPU mode yet.
250
  void EnableMKLDNN() override;
251 252

  /// \brief Not supported in GPU mode yet.
253
  void EnableMkldnnQuantizer() override;
254

255 256 257
  /// \brief Not supported in GPU mode yet.
  void EnableMkldnnBfloat16() override;

B
baoachun 已提交
258 259 260
  /// \brief Not supported in GPU mode yet.
  void EnableMkldnnInt8() override;

261
  /// \brief Default destructor.
262
  virtual ~GpuPassStrategy() = default;
263 264

 protected:
265
  /// \cond Protected
266
  bool use_cudnn_{false};
267
  bool use_gpu_fp16_{false};
268
  /// \endcond
269
};
270

271 272 273 274 275
/// \class XpuPassStrategy
/// \brief The XPU passes controller, it is used in AnalysisPredictor with XPU
/// mode.
class PD_INFER_DECL XpuPassStrategy final : public PassStrategy {
 public:
W
Wilber 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
  XpuPassStrategy() : PassStrategy({}) { use_xpu_ = true; }
};

/// \class NpuPassStrategy
/// \brief The NPU passes controller, it is used in AnalysisPredictor with NPU
/// mode.
class PD_INFER_DECL NpuPassStrategy final : public PassStrategy {
 public:
  NpuPassStrategy() : PassStrategy({}) { use_npu_ = true; }

  /// \brief Construct by copying another NpuPassStrategy object.
  /// \param[in] other The NpuPassStrategy object we want to copy.
  explicit NpuPassStrategy(const NpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {
    use_npu_ = true;
  }
292 293
};

J
jianghaicheng 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/// \class IpuPassStrategy
/// \brief The IPU passes controller, it is used in AnalysisPredictor with IPU
/// mode.
class PD_INFER_DECL IpuPassStrategy final : public PassStrategy {
 public:
  /// \brief Default constructor of IpuPassStrategy.
  IpuPassStrategy();

  /// \brief Construct by copying another IpuPassStrategy object.
  /// \param[in] other The IpuPassStrategy object we want to copy.
  explicit IpuPassStrategy(const IpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {
    use_ipu_ = true;
  }
};

310
/// \brief List of tensorRT subgraph passes.
311
PD_INFER_DECL extern const std::vector<std::string> kTRTSubgraphPasses;
312

D
denglin-github 已提交
313 314 315
/// \brief List of dlnne subgraph passes.
PD_INFER_DECL extern const std::vector<std::string> kDlnneSubgraphPasses;

316
/// \brief List of lite subgraph passes.
317
PD_INFER_DECL extern const std::vector<std::string> kLiteSubgraphPasses;
318

319
}  // namespace paddle