paddle_pass_builder.h 10.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <sstream>
#include <string>
#include <vector>
W
wanghuancoder 已提交
20

21
#include "paddle_infer_declare.h"  // NOLINT
22

23 24 25 26 27 28 29 30 31 32 33 34
///
/// \file paddle_pass_builder.h
///
/// \brief Class Paddle Passs Builder and its subclasses(pass strategies).
/// \section sec_intro Introduction
/// This class aims to build passes for paddle and define passes' strategies.
///
/// \author paddle-infer@baidu.com
/// \date 2020-3-23
/// \since 1.7

/// \namespace paddle
35
namespace paddle {
36

37 38 39 40 41 42 43 44 45 46 47
/// \class PaddlePassBuilder
/// \brief This class build passes based on vector<string> input. It is part of
/// inference API. Users can build passes, insert new passes, delete passes
/// using this class and its functions.
///
/// Example Usage:
///     Build a new pass.
/// \code{cpp}
/// const vector<string> passes(1, "conv_relu_mkldnn_fuse_pass");
/// PaddlePassBuilder builder(passes);
/// \endcode
48
class PD_INFER_DECL PaddlePassBuilder {
49
 public:
50 51
  /// \brief Constructor of the class. It stores the input passes.
  /// \param[in] passes passes' types.
52 53 54
  explicit PaddlePassBuilder(const std::vector<std::string> &passes)
      : passes_(passes) {}

55 56
  /// \brief Stores the input passes.
  /// \param[in] passes passes' types.
57 58 59 60
  void SetPasses(std::initializer_list<std::string> passes) {
    passes_ = passes;
  }

61 62
  /// \brief Append a pass to the end of the passes.
  /// \param[in] pass_type the type of the new pass.
63 64
  void AppendPass(const std::string &pass_type);

65 66 67
  /// \brief Insert a pass to a specific position.
  /// \param[in] idx the position to insert.
  /// \param[in] pass_type the type of insert pass.
68 69
  void InsertPass(size_t idx, const std::string &pass_type);

70 71
  /// \brief Delete the pass at certain position 'idx'.
  /// \param[in] idx the position to delete.
72 73
  void DeletePass(size_t idx);

74 75
  /// \brief Delete all passes that has a certain type 'pass_type'.
  /// \param[in] pass_type the certain pass type to be deleted.
76 77
  void DeletePass(const std::string &pass_type);

78
  /// \brief Delete all the passes.
79
  void ClearPasses();
80 81 82

  /// \brief Append an analysis pass.
  /// \param[in] pass the type of the new analysis pass.
Y
Yan Chunwei 已提交
83 84
  void AppendAnalysisPass(const std::string &pass);

85 86
  /// \brief Visualize the computation graph after each pass by generating a DOT
  /// language file, one can draw them with the Graphviz toolkit.
87
  void TurnOnDebug();
88
  /// \brief Human-readable information of the passes.
89 90
  std::string DebugString();

91 92
  /// \brief Get information of passes.
  /// \return Return list of the passes.
93
  const std::vector<std::string> &AllPasses() const { return passes_; }
94 95 96

  /// \brief Get information of analysis passes.
  /// \return Return list of analysis passes.
Y
Yan Chunwei 已提交
97 98 99 100 101 102 103
  std::vector<std::string> AnalysisPasses() const {
    auto passes = analysis_passes_;
    // To make sure the ir_graph_to_program should be the last pass so any
    // modication of IR will persist to the program.
    passes.push_back("ir_graph_to_program_pass");
    return passes;
  }
104 105

 protected:
106
  /// \cond Protected
Y
Yan Chunwei 已提交
107
  std::vector<std::string> analysis_passes_{
108
      {"ir_graph_build_pass", "ir_graph_clean_pass", "ir_analysis_pass",
109 110
       "ir_params_sync_among_devices_pass", "adjust_cudnn_workspace_size_pass",
       "inference_op_replace_pass"}};
111
  std::vector<std::string> passes_;
112
  /// \endcond
113 114
};

115 116 117
/// \class PassStrategy
/// \brief This class defines the pass strategies like whether to use gpu/cuDNN
/// kernel/MKLDNN.
118
class PD_INFER_DECL PassStrategy : public PaddlePassBuilder {
119
 public:
120 121
  /// \brief Constructor of PassStrategy class. It works the same as
  /// PaddlePassBuilder class. \param[in] passes passes' types.
122 123 124
  explicit PassStrategy(const std::vector<std::string> &passes)
      : PaddlePassBuilder(passes) {}

125
  /// \brief Enable the use of cuDNN kernel.
126 127
  virtual void EnableCUDNN() {}

128 129 130
  /// \brief Enable use gpu fp16 kernel.
  virtual void Exp_EnableUseGpuFp16() {}

131 132 133
  /// \brief Enable the use of MKLDNN.
  /// The MKLDNN control exists in both CPU and GPU mode, because there can
  /// still be some CPU kernels running in GPU mode.
Y
Yan Chunwei 已提交
134
  virtual void EnableMKLDNN() {}
135

136
  /// \brief Enable MKLDNN quantize optimization.
137
  virtual void EnableMkldnnQuantizer() {}
138

139 140 141
  /// \brief Enable MKLDNN bfloat16.
  virtual void EnableMkldnnBfloat16() {}

B
baoachun 已提交
142 143 144
  /// \brief Enable MKLDNN int8.
  virtual void EnableMkldnnInt8() {}

145 146
  /// \brief Check if we are using gpu.
  /// \return A bool variable implying whether we are in gpu mode.
147 148
  bool use_gpu() const { return use_gpu_; }

149 150 151 152
  /// \brief Check if we are using gpu fp16 kernel.
  /// \return A bool variable implying whether we are in gpu fp16 mode.
  bool use_gpu_fp16() const { return use_gpu_fp16_; }

153 154 155 156
  /// \brief Check if we are using xpu.
  /// \return A bool variable implying whether we are in xpu mode.
  bool use_xpu() const { return use_xpu_; }

W
Wilber 已提交
157 158 159 160
  /// \brief Check if we are using npu.
  /// \return A bool variable implying whether we are in npu mode.
  bool use_npu() const { return use_npu_; }

J
jianghaicheng 已提交
161 162 163 164
  /// \brief Check if we are using ipu.
  /// \return A bool variable implying whether we are in ipu mode.
  bool use_ipu() const { return use_ipu_; }

165
  /// \brief Default destructor.
166
  virtual ~PassStrategy() = default;
167 168

 protected:
169
  /// \cond Protected
170
  bool use_xpu_{false};
171
  bool use_gpu_{false};
W
Wilber 已提交
172
  bool use_npu_{false};
J
jianghaicheng 已提交
173
  bool use_ipu_{false};
Y
Yan Chunwei 已提交
174
  bool use_mkldnn_{false};
175
  bool use_gpu_fp16_{false};
176
  /// \endcond
177 178
};

179 180 181
/// \class CpuPassStrategy
/// \brief The CPU passes controller, it is used in AnalysisPredictor with CPU
/// mode.
182
class PD_INFER_DECL CpuPassStrategy : public PassStrategy {
183
 public:
184
  /// \brief Default constructor of CpuPassStrategy.
185
  CpuPassStrategy();
186

187 188
  /// \brief Construct by copying another CpuPassStrategy object.
  /// \param[in] other The CpuPassStrategy object we want to copy.
Y
Yan Chunwei 已提交
189
  explicit CpuPassStrategy(const CpuPassStrategy &other)
W
Wojciech Uss 已提交
190 191 192 193
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = other.use_gpu_;
    use_mkldnn_ = other.use_mkldnn_;
    use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
194
    use_mkldnn_bfloat16_ = other.use_mkldnn_bfloat16_;
B
baoachun 已提交
195
    use_mkldnn_int8_ = other.use_mkldnn_int8_;
W
Wojciech Uss 已提交
196
  }
197
  /// \brief Default destructor.
198 199
  virtual ~CpuPassStrategy() = default;

200
  /// \brief Enable the use of cuDNN kernel.
201
  void EnableCUDNN() override;
202 203

  /// \brief Enable the use of MKLDNN.
W
Wojciech Uss 已提交
204
  void EnableMKLDNN() override;
205 206

  /// \brief Enable MKLDNN quantize optimization.
W
Wojciech Uss 已提交
207
  void EnableMkldnnQuantizer() override;
208

209 210 211
  /// \brief Enable MKLDNN bfloat16.
  void EnableMkldnnBfloat16() override;

B
baoachun 已提交
212 213 214
  /// \brief Enable MKLDNN int8.
  void EnableMkldnnInt8() override;

215
 protected:
216
  /// \cond Protected
217
  bool use_mkldnn_quantizer_{false};
218
  bool use_mkldnn_bfloat16_{false};
B
baoachun 已提交
219
  bool use_mkldnn_int8_{false};
220
  /// \endcond
221 222
};

223 224 225
/// \class GpuPassStrategy
/// \brief The GPU passes controller, it is used in AnalysisPredictor with GPU
/// mode.
226
class PD_INFER_DECL GpuPassStrategy : public PassStrategy {
227
 public:
228
  /// \brief Default constructor of GpuPassStrategy.
229
  GpuPassStrategy();
230

231 232
  /// \brief Construct by copying another GpuPassStrategy object.
  /// \param[in] other The GpuPassStrategy object we want to copy.
Y
Yan Chunwei 已提交
233
  explicit GpuPassStrategy(const GpuPassStrategy &other)
234 235
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = true;
236
    use_cudnn_ = other.use_cudnn_;
237
  }
238

239
  /// \brief Enable the use of cuDNN kernel.
240
  void EnableCUDNN() override;
241

242 243 244
  /// \brief Enable the use of gpu fp16 kernel.
  void Exp_EnableUseGpuFp16() override;

245
  /// \brief Not supported in GPU mode yet.
246
  void EnableMKLDNN() override;
247 248

  /// \brief Not supported in GPU mode yet.
249
  void EnableMkldnnQuantizer() override;
250

251 252 253
  /// \brief Not supported in GPU mode yet.
  void EnableMkldnnBfloat16() override;

B
baoachun 已提交
254 255 256
  /// \brief Not supported in GPU mode yet.
  void EnableMkldnnInt8() override;

257
  /// \brief Default destructor.
258
  virtual ~GpuPassStrategy() = default;
259 260

 protected:
261
  /// \cond Protected
262
  bool use_cudnn_{false};
263
  bool use_gpu_fp16_{false};
264
  /// \endcond
265
};
266

267 268 269 270 271
/// \class XpuPassStrategy
/// \brief The XPU passes controller, it is used in AnalysisPredictor with XPU
/// mode.
class PD_INFER_DECL XpuPassStrategy final : public PassStrategy {
 public:
W
Wilber 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
  XpuPassStrategy() : PassStrategy({}) { use_xpu_ = true; }
};

/// \class NpuPassStrategy
/// \brief The NPU passes controller, it is used in AnalysisPredictor with NPU
/// mode.
class PD_INFER_DECL NpuPassStrategy final : public PassStrategy {
 public:
  NpuPassStrategy() : PassStrategy({}) { use_npu_ = true; }

  /// \brief Construct by copying another NpuPassStrategy object.
  /// \param[in] other The NpuPassStrategy object we want to copy.
  explicit NpuPassStrategy(const NpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {
    use_npu_ = true;
  }
288 289
};

J
jianghaicheng 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
/// \class IpuPassStrategy
/// \brief The IPU passes controller, it is used in AnalysisPredictor with IPU
/// mode.
class PD_INFER_DECL IpuPassStrategy final : public PassStrategy {
 public:
  /// \brief Default constructor of IpuPassStrategy.
  IpuPassStrategy();

  /// \brief Construct by copying another IpuPassStrategy object.
  /// \param[in] other The IpuPassStrategy object we want to copy.
  explicit IpuPassStrategy(const IpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {
    use_ipu_ = true;
  }
};

306
/// \brief List of tensorRT subgraph passes.
307
PD_INFER_DECL extern const std::vector<std::string> kTRTSubgraphPasses;
308

D
denglin-github 已提交
309 310 311
/// \brief List of dlnne subgraph passes.
PD_INFER_DECL extern const std::vector<std::string> kDlnneSubgraphPasses;

312
/// \brief List of lite subgraph passes.
313
PD_INFER_DECL extern const std::vector<std::string> kLiteSubgraphPasses;
314

315
}  // namespace paddle