paddle_pass_builder.cc 16.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/api/paddle_pass_builder.h"
16 17 18
#ifdef PADDLE_WITH_CUDA
#include <cudnn.h>
#endif
19 20 21
#ifdef PADDLE_WITH_HIP
#include <miopen/miopen.h>
#endif
22
#include <glog/logging.h>
23

24
#include <algorithm>
25
#include <sstream>
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54

namespace paddle {

void PaddlePassBuilder::AppendPass(const std::string &pass_type) {
  passes_.push_back(pass_type);
}

void PaddlePassBuilder::TurnOnDebug() {
  std::vector<std::string> passes;
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it != "graph_viz_pass") {
      it = passes_.insert(it + 1, "graph_viz_pass");
    } else {
      ++it;
    }
  }
}

std::string PaddlePassBuilder::DebugString() {
  std::stringstream ss;
  ss << "Passes to apply:\n";
  for (auto &pass : passes_) {
    ss << "  - " << pass << '\n';
  }
  return ss.str();
}

void PaddlePassBuilder::DeletePass(const std::string &pass_type) {
55
  deleted_passes_.insert(pass_type);
56 57 58 59 60 61 62 63 64 65
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it == pass_type) {
      it = passes_.erase(it);
    } else {
      ++it;
    }
  }
}

66 67 68 69 70 71
size_t PaddlePassBuilder::GetPassIndex(const std::string &pass_type) {
  auto iter = std::find(std::begin(passes_), std::end(passes_), pass_type);
  if (iter == std::end(passes_)) return -1;
  return std::distance(std::begin(passes_), iter);
}

72 73 74 75 76 77 78 79
void PaddlePassBuilder::InsertPass(size_t idx, const std::string &pass_type) {
  passes_.insert(std::begin(passes_) + idx, pass_type);
}

void PaddlePassBuilder::DeletePass(size_t idx) {
  passes_.erase(std::begin(passes_) + idx);
}

W
Wojciech Uss 已提交
80 81
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
  analysis_passes_.push_back(pass);
82 83
}

W
Wojciech Uss 已提交
84 85
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }

86
const std::vector<std::string> kTRTSubgraphPasses({
87 88
  "identity_scale_op_clean_pass",              //
      "adaptive_pool2d_convert_global_pass",   //
89 90
      "shuffle_channel_detect_pass",           //
      "quant_conv2d_dequant_fuse_pass",        //
S
shentanyue 已提交
91
      "delete_fill_constant_op_pass",          //
92 93 94 95 96
      "delete_quant_dequant_op_pass",          //
      "delete_quant_dequant_filter_op_pass",   //
      "delete_weight_dequant_linear_op_pass",  //
      "delete_quant_dequant_linear_op_pass",   //
      "add_support_int8_pass",                 //
97 98
      // "fc_fuse_pass",                        //
      "simplify_with_basic_ops_pass",                 //
99
      "trt_embedding_eltwise_layernorm_fuse_pass",    //
100
      "preln_embedding_eltwise_layernorm_fuse_pass",  //
101
      "delete_c_identity_op_pass",                    //
102 103 104
      "trt_multihead_matmul_fuse_pass_v2",            //
      "trt_multihead_matmul_fuse_pass_v3",            //
      "trt_skip_layernorm_fuse_pass",                 //
105
      "preln_skip_layernorm_fuse_pass",               //
106
      "preln_residual_bias_fuse_pass",                //
107
      // "set_transformer_input_convert_pass",           //
108 109 110 111 112 113 114 115 116 117 118 119
      "conv_bn_fuse_pass",                           //
      "unsqueeze2_eltwise_fuse_pass",                //
      "trt_squeeze2_matmul_fuse_pass",               //
      "trt_reshape2_matmul_fuse_pass",               //
      "trt_flatten2_matmul_fuse_pass",               //
      "trt_map_matmul_v2_to_mul_pass",               //
      "trt_map_matmul_v2_to_matmul_pass",            //
      "trt_map_matmul_to_mul_pass",                  //
      "fc_fuse_pass",                                //
      "conv_elementwise_add_fuse_pass",              //
      "remove_padding_recover_padding_pass",         //
      "delete_remove_padding_recover_padding_pass",  //
120
      // "yolo_box_fuse_pass",      //
121 122 123 124
      "dense_fc_to_sparse_pass",                //
      "dense_multihead_matmul_to_sparse_pass",  //
      "tensorrt_subgraph_pass",                 //
      "conv_bn_fuse_pass",                      //
125 126
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
127 128 129
// cudnn8.0 has memory leak problem in conv + eltwise + act, so we
// disable the pass.
#if !(CUDNN_VERSION >= 8000 && CUDNN_VERSION < 8100)
130 131
      "conv_elementwise_add_act_fuse_pass",   //
      "conv_elementwise_add2_act_fuse_pass",  //
132 133
#endif
#endif
134 135 136
      "transpose_flatten_concat_fuse_pass",
});

D
denglin-github 已提交
137 138
const std::vector<std::string> kDlnneSubgraphPasses({
    "is_test_pass",                  //
D
denglin-github 已提交
139
    "delete_dropout_op_pass"         //
D
denglin-github 已提交
140 141 142 143 144 145 146
    "simplify_with_basic_ops_pass",  //
    "conv_bn_fuse_pass",             //
    "depthwise_conv_bn_fuse_pass",   //
    "shuffle_channel_detect_pass",   //
    "dlnne_subgraph_pass",           //
});

石晓伟 已提交
147 148 149 150 151 152
const std::vector<std::string> kLiteSubgraphPasses({
#ifdef PADDLE_WITH_LITE
    "lite_subgraph_pass",
#endif
});

153 154 155 156 157 158 159 160 161 162
// TODO(inference): Most of the existing pass fusion operators do not
// support fp16/bf16 precision, temporarily use low precision pass to prevent
// running errors. After fusion operator supports low precision, delete this.
const std::vector<std::string> kGpuLowerPrecisionPasses{
    // "conv_bn_fuse_pass",
    // "conv_eltwiseadd_bn_fuse_pass",
};
const std::vector<std::string> kTrtLowerPrecisionPasses{
    // "conv_bn_fuse_pass",
    // "conv_eltwiseadd_bn_fuse_pass",
163 164 165 166
    "trt_map_matmul_v2_to_mul_pass",
    "trt_map_matmul_v2_to_matmul_pass",
    "trt_map_matmul_to_mul_pass",
    "fc_fuse_pass",
167 168 169
    "tensorrt_subgraph_pass",
};

170 171
GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
  passes_.assign({
172
    //   "identity_scale_op_clean_pass",             //
173 174 175 176 177 178 179 180 181 182 183
    "is_test_pass",                               //
        "simplify_with_basic_ops_pass",           //
        "conv_bn_fuse_pass",                      //
        "conv_eltwiseadd_bn_fuse_pass",           //
        "embedding_eltwise_layernorm_fuse_pass",  //
        "multihead_matmul_fuse_pass_v2",          //
        "gpu_cpu_squeeze2_matmul_fuse_pass",      //
        "gpu_cpu_reshape2_matmul_fuse_pass",      //
        "gpu_cpu_flatten2_matmul_fuse_pass",      //
        "gpu_cpu_map_matmul_v2_to_mul_pass",      //
        "gpu_cpu_map_matmul_v2_to_matmul_pass",   //
184 185
        "matmul_scale_fuse_pass",                 //
        "multihead_matmul_fuse_pass_v3",          //
186 187 188
        "gpu_cpu_map_matmul_to_mul_pass",         //
        "fc_fuse_pass",                           //
        "fc_elementwise_layernorm_fuse_pass",     //
189 190
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
191 192 193
// cudnn8.0 has memory leak problem in conv + eltwise + act, so we
// disable the pass.
#if !(CUDNN_VERSION >= 8000 && CUDNN_VERSION < 8100)
194 195
        "conv_elementwise_add_act_fuse_pass",   //
        "conv_elementwise_add2_act_fuse_pass",  //
196 197 198 199
#endif
        "conv_elementwise_add_fuse_pass",      //
#endif                                         //
        "transpose_flatten_concat_fuse_pass",  //
200
        // following pass should be located in the last, since it will
201 202
        // work on all fused ops.
        "runtime_context_cache_pass"
203 204 205 206 207
  });

  use_gpu_ = true;
}

208 209 210 211 212 213 214
void GpuPassStrategy::EnableCUDNN() {
  if (!use_cudnn_) {
    passes_.insert(passes_.begin(), "cudnn_placement_pass");
  }
  use_cudnn_ = true;
}

W
Wojciech Uss 已提交
215 216
void GpuPassStrategy::EnableMKLDNN() {
  LOG(ERROR) << "GPU not support MKLDNN yet";
217 218
}

W
Wojciech Uss 已提交
219 220
void GpuPassStrategy::EnableMkldnnQuantizer() {
  LOG(ERROR) << "GPU not support MKL-DNN quantization";
Y
Yan Chunwei 已提交
221 222
}

223 224 225 226
void GpuPassStrategy::EnableMkldnnBfloat16() {
  LOG(ERROR) << "GPU not support MKL-DNN bfloat16";
}

B
baoachun 已提交
227 228 229 230
void GpuPassStrategy::EnableMkldnnInt8() {
  LOG(ERROR) << "GPU not support MKL-DNN int8";
}

231 232 233
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
  // NOTE the large fusions should be located in the front, so that they will
  // not be damaged by smaller ones.
234 235
  passes_.assign({"simplify_with_basic_ops_pass",  //
                  "layer_norm_fuse_pass",
236
                  "attention_lstm_fuse_pass",       //
237 238
                  "seqconv_eltadd_relu_fuse_pass",  //
                  // "seqpool_concat_fuse_pass",    //
239
                  "seqpool_cvm_concat_fuse_pass",  //
240
                  // "embedding_fc_lstm_fuse_pass", //
241
                  // TODO(wilber): fix correctness problem.
242
                  // "fc_lstm_fuse_pass",                    //
243 244 245 246
                  "mul_lstm_fuse_pass",                      //
                  "fc_gru_fuse_pass",                        //
                  "mul_gru_fuse_pass",                       //
                  "seq_concat_fc_fuse_pass",                 //
247 248 249
                  "gpu_cpu_squeeze2_matmul_fuse_pass",       //
                  "gpu_cpu_reshape2_matmul_fuse_pass",       //
                  "gpu_cpu_flatten2_matmul_fuse_pass",       //
H
heliqi 已提交
250
                  "matmul_v2_scale_fuse_pass",               //
251 252
                  "gpu_cpu_map_matmul_v2_to_mul_pass",       //
                  "gpu_cpu_map_matmul_v2_to_matmul_pass",    //
H
heliqi 已提交
253
                  "matmul_scale_fuse_pass",                  //
254
                  "gpu_cpu_map_matmul_to_mul_pass",          //
255 256 257 258 259 260 261 262
                  "fc_fuse_pass",                            //
                  "repeated_fc_relu_fuse_pass",              //
                  "squared_mat_sub_fuse_pass",               //
                  "conv_bn_fuse_pass",                       //
                  "conv_eltwiseadd_bn_fuse_pass",            //
                  "conv_transpose_bn_fuse_pass",             //
                  "conv_transpose_eltwiseadd_bn_fuse_pass",  //
                  "is_test_pass",                            //
263 264
                  // following pass should be located in the last, since
                  // it will work on all fused ops.
265
                  "runtime_context_cache_pass"});
Y
Yan Chunwei 已提交
266

267 268
  use_gpu_ = false;
}
W
Wojciech Uss 已提交
269

270 271
void CpuPassStrategy::EnableCUDNN() { LOG(ERROR) << "CPU not support cuDNN"; }

W
Wojciech Uss 已提交
272 273 274 275 276 277
void CpuPassStrategy::EnableMKLDNN() {
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_) {
    passes_.insert(passes_.begin(), "mkldnn_placement_pass");

278
    for (auto &pass : std::vector<std::string>({
279 280 281
             "depthwise_conv_mkldnn_pass",    //
             "conv_bn_fuse_pass",             // Execute BN passes again to
             "conv_eltwiseadd_bn_fuse_pass",  // preserve correct pass order
282 283
             "conv_affine_channel_mkldnn_fuse_pass",    //
             "conv_transpose_bn_fuse_pass",             //
284 285
             "conv_transpose_eltwiseadd_bn_fuse_pass",  //
             "conv_bias_mkldnn_fuse_pass",              //
286
             "conv_transpose_bias_mkldnn_fuse_pass",
287 288
             // TODO(baoachun): Need to support 5-dimensional input.
             // "conv3d_bias_mkldnn_fuse_pass",  //
289 290
             "conv_elementwise_add_mkldnn_fuse_pass",
             "conv_concat_relu_mkldnn_fuse_pass",
291
             "conv_activation_mkldnn_fuse_pass",              //
292 293 294 295 296
             "scale_matmul_fuse_pass",                        //
             "reshape_transpose_matmul_mkldnn_fuse_pass",     //
             "reshape_transpose_matmul_v2_mkldnn_fuse_pass",  //
             "matmul_transpose_reshape_fuse_pass",            //
             "matmul_v2_transpose_reshape_fuse_pass",         //
297
             // Disabled due to topology-dependent speed-up
H
heliqi 已提交
298 299
             //  "fc_mkldnn_pass",
             //  "fc_act_mkldnn_fuse_pass",
300
             "fc_elementwise_add_mkldnn_fuse_pass",   //
301 302
             "batch_norm_act_fuse_pass",              //
             "softplus_activation_mkldnn_fuse_pass",  //
303
             "shuffle_channel_mkldnn_detect_pass",    //
304
             "elt_act_mkldnn_fuse_pass",              //
305 306
             // TODO(intel): Please fix the bug on windows.
             // https://github.com/PaddlePaddle/Paddle/issues/29710
307
             // "mkldnn_inplace_pass",  // This pass should be activated after
308 309
             // fuses. Disabled by default due to
             // little gain and lots of problems
310
         })) {
W
Wojciech Uss 已提交
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
      passes_.push_back(pass);
    }
  }
  use_mkldnn_ = true;
#else
  use_mkldnn_ = false;
#endif
}

void CpuPassStrategy::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_quantizer_) {
    passes_.push_back("cpu_quantize_placement_pass");
  }
  use_mkldnn_quantizer_ = true;
#else
  use_mkldnn_quantizer_ = false;
#endif
}

331 332
void CpuPassStrategy::EnableMkldnnBfloat16() {
#ifdef PADDLE_WITH_MKLDNN
333
  if (!use_mkldnn_bfloat16_) {
T
Tomasz Socha 已提交
334 335 336 337
    passes_.push_back("fc_mkldnn_pass");
    passes_.push_back("fc_act_mkldnn_fuse_pass");
    passes_.push_back("fc_elementwise_add_mkldnn_fuse_pass");

338 339
    passes_.push_back("cpu_bfloat16_placement_pass");
    passes_.push_back("cpu_bfloat16_pass");
340
    passes_.push_back("cpu_quantize_squash_pass");
341
  }
342 343 344 345 346 347
  use_mkldnn_bfloat16_ = true;
#else
  use_mkldnn_bfloat16_ = false;
#endif
}

B
baoachun 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
void CpuPassStrategy::EnableMkldnnInt8() {
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_int8_) {
    passes_.clear();
    passes_.push_back("quant_dequant_mkldnn_pass");
    passes_.push_back("layer_norm_fuse_pass");
    passes_.push_back("attention_lstm_fuse_pass");
    passes_.push_back("seqconv_eltadd_relu_fuse_pass");
    passes_.push_back("fc_lstm_fuse_pass");
    passes_.push_back("mul_lstm_fuse_pass");
    passes_.push_back("fc_gru_fuse_pass");
    passes_.push_back("mul_gru_fuse_pass");
    passes_.push_back("multi_gru_fuse_pass");
    passes_.push_back("multi_gru_seq_fuse_pass");
    passes_.push_back("seq_concat_fc_fuse_pass");
    passes_.push_back("gpu_cpu_squeeze2_matmul_fuse_pass");
    passes_.push_back("gpu_cpu_reshape2_matmul_fuse_pass");
    passes_.push_back("gpu_cpu_flatten2_matmul_fuse_pass");
    passes_.push_back("matmul_v2_scale_fuse_pass");
    passes_.push_back("squared_mat_sub_fuse_pass");
    passes_.push_back("is_test_pass");
    passes_.push_back("gpu_cpu_map_matmul_v2_to_mul_pass");
    passes_.push_back("gpu_cpu_map_matmul_v2_to_matmul_pass");
    passes_.push_back("matmul_scale_fuse_pass");
    passes_.push_back("gpu_cpu_map_matmul_to_mul_pass");
    passes_.push_back("repeated_fc_relu_fuse_pass");
    passes_.push_back("mkldnn_placement_pass");
    passes_.push_back("depthwise_conv_mkldnn_pass");
    passes_.push_back("conv_bn_fuse_pass");
    passes_.push_back("conv_eltwiseadd_bn_fuse_pass");
    passes_.push_back("conv_transpose_bn_fuse_pass");
    passes_.push_back("conv_transpose_eltwiseadd_bn_fuse_pass");
    passes_.push_back("conv_bias_mkldnn_fuse_pass");
    passes_.push_back("conv_transpose_bias_mkldnn_fuse_pass");
    passes_.push_back("conv_elementwise_add_mkldnn_fuse_pass");
    passes_.push_back("conv_concat_relu_mkldnn_fuse_pass");
384
    passes_.push_back("conv_activation_mkldnn_fuse_pass");
B
baoachun 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
    passes_.push_back("fc_fuse_pass");
    passes_.push_back("repeated_fc_relu_fuse_pass");
    passes_.push_back("fc_mkldnn_pass");
    passes_.push_back("fc_act_mkldnn_fuse_pass");
    passes_.push_back("matmul_transpose_reshape_fuse_pass");
    passes_.push_back("matmul_v2_transpose_reshape_fuse_pass");
    passes_.push_back("batch_norm_act_fuse_pass");
    passes_.push_back("softplus_activation_mkldnn_fuse_pass");
    passes_.push_back("compute_propagate_scales_mkldnn_pass");
    passes_.push_back("scale_matmul_fuse_pass");
    passes_.push_back("reshape_transpose_matmul_mkldnn_fuse_pass");
    passes_.push_back("reshape_transpose_matmul_v2_mkldnn_fuse_pass");
    passes_.push_back("cpu_quantize_placement_pass");
    passes_.push_back("cpu_quantize_pass");
    passes_.push_back("cpu_quantize_squash_pass");
    passes_.push_back("simplify_with_basic_ops_pass");
    passes_.push_back("mkldnn_inplace_pass");
    passes_.push_back("runtime_context_cache_pass");
  }
  use_mkldnn_int8_ = true;
#else
  use_mkldnn_int8_ = false;
#endif
}

J
jianghaicheng 已提交
410 411 412 413
IpuPassStrategy::IpuPassStrategy() : PassStrategy({}) {
  passes_.assign({"inference_process_pass"});
}

414
}  // namespace paddle