paddle_pass_builder.h 9.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <sstream>
#include <string>
#include <vector>
W
wanghuancoder 已提交
20

21
#include "paddle_infer_declare.h"  // NOLINT
22

23 24 25 26 27 28 29 30 31 32 33 34
///
/// \file paddle_pass_builder.h
///
/// \brief Class Paddle Passs Builder and its subclasses(pass strategies).
/// \section sec_intro Introduction
/// This class aims to build passes for paddle and define passes' strategies.
///
/// \author paddle-infer@baidu.com
/// \date 2020-3-23
/// \since 1.7

/// \namespace paddle
35
namespace paddle {
36

37 38 39 40 41 42 43 44 45 46 47
/// \class PaddlePassBuilder
/// \brief This class build passes based on vector<string> input. It is part of
/// inference API. Users can build passes, insert new passes, delete passes
/// using this class and its functions.
///
/// Example Usage:
///     Build a new pass.
/// \code{cpp}
/// const vector<string> passes(1, "conv_relu_mkldnn_fuse_pass");
/// PaddlePassBuilder builder(passes);
/// \endcode
48
class PD_INFER_DECL PaddlePassBuilder {
49
 public:
50 51
  /// \brief Constructor of the class. It stores the input passes.
  /// \param[in] passes passes' types.
52 53 54
  explicit PaddlePassBuilder(const std::vector<std::string> &passes)
      : passes_(passes) {}

55 56
  /// \brief Stores the input passes.
  /// \param[in] passes passes' types.
57 58 59 60
  void SetPasses(std::initializer_list<std::string> passes) {
    passes_ = passes;
  }

61 62
  /// \brief Append a pass to the end of the passes.
  /// \param[in] pass_type the type of the new pass.
63 64
  void AppendPass(const std::string &pass_type);

65 66 67
  /// \brief Insert a pass to a specific position.
  /// \param[in] idx the position to insert.
  /// \param[in] pass_type the type of insert pass.
68 69
  void InsertPass(size_t idx, const std::string &pass_type);

70 71
  /// \brief Delete the pass at certain position 'idx'.
  /// \param[in] idx the position to delete.
72 73
  void DeletePass(size_t idx);

74 75
  /// \brief Delete all passes that has a certain type 'pass_type'.
  /// \param[in] pass_type the certain pass type to be deleted.
76 77
  void DeletePass(const std::string &pass_type);

L
lidanqing 已提交
78 79 80 81
  /// \brief Get the certain position of a pass.
  //  \param[in] pass_type the type of insert pass.
  size_t GetPassIndex(const std::string &pass_type);

82
  /// \brief Delete all the passes.
83
  void ClearPasses();
84 85 86

  /// \brief Append an analysis pass.
  /// \param[in] pass the type of the new analysis pass.
Y
Yan Chunwei 已提交
87 88
  void AppendAnalysisPass(const std::string &pass);

89 90
  /// \brief Visualize the computation graph after each pass by generating a DOT
  /// language file, one can draw them with the Graphviz toolkit.
91
  void TurnOnDebug();
92
  /// \brief Human-readable information of the passes.
93 94
  std::string DebugString();

95 96
  /// \brief Get information of passes.
  /// \return Return list of the passes.
97
  const std::vector<std::string> &AllPasses() const { return passes_; }
98 99 100

  /// \brief Get information of analysis passes.
  /// \return Return list of analysis passes.
Y
Yan Chunwei 已提交
101 102 103 104 105 106 107
  std::vector<std::string> AnalysisPasses() const {
    auto passes = analysis_passes_;
    // To make sure the ir_graph_to_program should be the last pass so any
    // modication of IR will persist to the program.
    passes.push_back("ir_graph_to_program_pass");
    return passes;
  }
108 109

 protected:
110
  /// \cond Protected
Y
Yan Chunwei 已提交
111
  std::vector<std::string> analysis_passes_{
112
      {"ir_graph_build_pass", "ir_graph_clean_pass", "ir_analysis_pass",
113 114
       "ir_params_sync_among_devices_pass", "adjust_cudnn_workspace_size_pass",
       "inference_op_replace_pass"}};
115
  std::vector<std::string> passes_;
116
  /// \endcond
117 118
};

119 120 121
/// \class PassStrategy
/// \brief This class defines the pass strategies like whether to use gpu/cuDNN
/// kernel/MKLDNN.
122
class PD_INFER_DECL PassStrategy : public PaddlePassBuilder {
123
 public:
124 125
  /// \brief Constructor of PassStrategy class. It works the same as
  /// PaddlePassBuilder class. \param[in] passes passes' types.
126 127 128
  explicit PassStrategy(const std::vector<std::string> &passes)
      : PaddlePassBuilder(passes) {}

129
  /// \brief Enable the use of cuDNN kernel.
130 131
  virtual void EnableCUDNN() {}

132 133 134
  /// \brief Enable use gpu fp16 kernel.
  virtual void Exp_EnableUseGpuFp16() {}

135 136 137
  /// \brief Enable the use of MKLDNN.
  /// The MKLDNN control exists in both CPU and GPU mode, because there can
  /// still be some CPU kernels running in GPU mode.
Y
Yan Chunwei 已提交
138
  virtual void EnableMKLDNN() {}
139

140
  /// \brief Enable MKLDNN quantize optimization.
141
  virtual void EnableMkldnnQuantizer() {}
142

143 144 145
  /// \brief Enable MKLDNN bfloat16.
  virtual void EnableMkldnnBfloat16() {}

146 147
  /// \brief Check if we are using gpu.
  /// \return A bool variable implying whether we are in gpu mode.
148 149
  bool use_gpu() const { return use_gpu_; }

150 151 152 153
  /// \brief Check if we are using gpu fp16 kernel.
  /// \return A bool variable implying whether we are in gpu fp16 mode.
  bool use_gpu_fp16() const { return use_gpu_fp16_; }

154 155 156 157
  /// \brief Check if we are using xpu.
  /// \return A bool variable implying whether we are in xpu mode.
  bool use_xpu() const { return use_xpu_; }

W
Wilber 已提交
158 159 160 161
  /// \brief Check if we are using npu.
  /// \return A bool variable implying whether we are in npu mode.
  bool use_npu() const { return use_npu_; }

J
jianghaicheng 已提交
162 163 164 165
  /// \brief Check if we are using ipu.
  /// \return A bool variable implying whether we are in ipu mode.
  bool use_ipu() const { return use_ipu_; }

166
  /// \brief Default destructor.
167
  virtual ~PassStrategy() = default;
168 169

 protected:
170
  /// \cond Protected
171
  bool use_xpu_{false};
172
  bool use_gpu_{false};
W
Wilber 已提交
173
  bool use_npu_{false};
J
jianghaicheng 已提交
174
  bool use_ipu_{false};
Y
Yan Chunwei 已提交
175
  bool use_mkldnn_{false};
176
  bool use_gpu_fp16_{false};
177
  /// \endcond
178 179
};

180 181 182
/// \class CpuPassStrategy
/// \brief The CPU passes controller, it is used in AnalysisPredictor with CPU
/// mode.
183
class PD_INFER_DECL CpuPassStrategy : public PassStrategy {
184
 public:
185
  /// \brief Default constructor of CpuPassStrategy.
186
  CpuPassStrategy();
187

188 189
  /// \brief Construct by copying another CpuPassStrategy object.
  /// \param[in] other The CpuPassStrategy object we want to copy.
Y
Yan Chunwei 已提交
190
  explicit CpuPassStrategy(const CpuPassStrategy &other)
W
Wojciech Uss 已提交
191 192 193 194
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = other.use_gpu_;
    use_mkldnn_ = other.use_mkldnn_;
    use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
195
    use_mkldnn_bfloat16_ = other.use_mkldnn_bfloat16_;
W
Wojciech Uss 已提交
196
  }
197
  /// \brief Default destructor.
198 199
  virtual ~CpuPassStrategy() = default;

200
  /// \brief Enable the use of cuDNN kernel.
201
  void EnableCUDNN() override;
202 203

  /// \brief Enable the use of MKLDNN.
W
Wojciech Uss 已提交
204
  void EnableMKLDNN() override;
205 206

  /// \brief Enable MKLDNN quantize optimization.
W
Wojciech Uss 已提交
207
  void EnableMkldnnQuantizer() override;
208

209 210 211
  /// \brief Enable MKLDNN bfloat16.
  void EnableMkldnnBfloat16() override;

212
 protected:
213
  /// \cond Protected
214
  bool use_mkldnn_quantizer_{false};
215
  bool use_mkldnn_bfloat16_{false};
216
  /// \endcond
217 218
};

219 220 221
/// \class GpuPassStrategy
/// \brief The GPU passes controller, it is used in AnalysisPredictor with GPU
/// mode.
222
class PD_INFER_DECL GpuPassStrategy : public PassStrategy {
223
 public:
224
  /// \brief Default constructor of GpuPassStrategy.
225
  GpuPassStrategy();
226

227 228
  /// \brief Construct by copying another GpuPassStrategy object.
  /// \param[in] other The GpuPassStrategy object we want to copy.
Y
Yan Chunwei 已提交
229
  explicit GpuPassStrategy(const GpuPassStrategy &other)
230 231
      : PassStrategy(other.AllPasses()) {
    use_gpu_ = true;
232
    use_cudnn_ = other.use_cudnn_;
233
  }
234

235
  /// \brief Enable the use of cuDNN kernel.
236
  void EnableCUDNN() override;
237

238 239 240
  /// \brief Enable the use of gpu fp16 kernel.
  void Exp_EnableUseGpuFp16() override;

241
  /// \brief Not supported in GPU mode yet.
242
  void EnableMKLDNN() override;
243 244

  /// \brief Not supported in GPU mode yet.
245
  void EnableMkldnnQuantizer() override;
246

247 248 249
  /// \brief Not supported in GPU mode yet.
  void EnableMkldnnBfloat16() override;

250
  /// \brief Default destructor.
251
  virtual ~GpuPassStrategy() = default;
252 253

 protected:
254
  /// \cond Protected
255
  bool use_cudnn_{false};
256
  bool use_gpu_fp16_{false};
257
  /// \endcond
258
};
259

260 261 262 263 264
/// \class XpuPassStrategy
/// \brief The XPU passes controller, it is used in AnalysisPredictor with XPU
/// mode.
class PD_INFER_DECL XpuPassStrategy final : public PassStrategy {
 public:
W
Wilber 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
  XpuPassStrategy() : PassStrategy({}) { use_xpu_ = true; }
};

/// \class NpuPassStrategy
/// \brief The NPU passes controller, it is used in AnalysisPredictor with NPU
/// mode.
class PD_INFER_DECL NpuPassStrategy final : public PassStrategy {
 public:
  NpuPassStrategy() : PassStrategy({}) { use_npu_ = true; }

  /// \brief Construct by copying another NpuPassStrategy object.
  /// \param[in] other The NpuPassStrategy object we want to copy.
  explicit NpuPassStrategy(const NpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {
    use_npu_ = true;
  }
281 282
};

J
jianghaicheng 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
/// \class IpuPassStrategy
/// \brief The IPU passes controller, it is used in AnalysisPredictor with IPU
/// mode.
class PD_INFER_DECL IpuPassStrategy final : public PassStrategy {
 public:
  /// \brief Default constructor of IpuPassStrategy.
  IpuPassStrategy();

  /// \brief Construct by copying another IpuPassStrategy object.
  /// \param[in] other The IpuPassStrategy object we want to copy.
  explicit IpuPassStrategy(const IpuPassStrategy &other)
      : PassStrategy(other.AllPasses()) {
    use_ipu_ = true;
  }
};

299
/// \brief List of tensorRT subgraph passes.
300
PD_INFER_DECL extern const std::vector<std::string> kTRTSubgraphPasses;
301

D
denglin-github 已提交
302 303 304
/// \brief List of dlnne subgraph passes.
PD_INFER_DECL extern const std::vector<std::string> kDlnneSubgraphPasses;

305
/// \brief List of lite subgraph passes.
306
PD_INFER_DECL extern const std::vector<std::string> kLiteSubgraphPasses;
307

308
}  // namespace paddle