paddle_pass_builder.cc 9.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/api/paddle_pass_builder.h"
16 17 18
#ifdef PADDLE_WITH_CUDA
#include <cudnn.h>
#endif
19
#include <glog/logging.h>
20
#include <sstream>
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

namespace paddle {

void PaddlePassBuilder::AppendPass(const std::string &pass_type) {
  passes_.push_back(pass_type);
}

void PaddlePassBuilder::TurnOnDebug() {
  std::vector<std::string> passes;
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it != "graph_viz_pass") {
      it = passes_.insert(it + 1, "graph_viz_pass");
    } else {
      ++it;
    }
  }
}

std::string PaddlePassBuilder::DebugString() {
  std::stringstream ss;
  ss << "Passes to apply:\n";
  for (auto &pass : passes_) {
    ss << "  - " << pass << '\n';
  }
  return ss.str();
}

void PaddlePassBuilder::DeletePass(const std::string &pass_type) {
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it == pass_type) {
      it = passes_.erase(it);
    } else {
      ++it;
    }
  }
}

void PaddlePassBuilder::InsertPass(size_t idx, const std::string &pass_type) {
  passes_.insert(std::begin(passes_) + idx, pass_type);
}

void PaddlePassBuilder::DeletePass(size_t idx) {
  passes_.erase(std::begin(passes_) + idx);
}

W
Wojciech Uss 已提交
68 69
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
  analysis_passes_.push_back(pass);
70 71
}

W
Wojciech Uss 已提交
72 73
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }

74
const std::vector<std::string> kTRTSubgraphPasses({
75 76
  "conv_affine_channel_fuse_pass",  //
      "adaptive_pool2d_convert_global_pass",
77
      "conv_eltwiseadd_affine_channel_fuse_pass",  //
78
      "shuffle_channel_detect_pass",               //
79 80
      "quant_conv2d_dequant_fuse_pass",            //
      "delete_quant_dequant_op_pass",              //
81
      "delete_quant_dequant_filter_op_pass",       //
P
Pei Yang 已提交
82
      // "fc_fuse_pass",                                 //
83 84 85 86
      "simplify_with_basic_ops_pass",           //
      "embedding_eltwise_layernorm_fuse_pass",  //
      "multihead_matmul_fuse_pass_v2",          //
      "skip_layernorm_fuse_pass",               //
87 88 89 90
      "conv_bn_fuse_pass",                      //
      "unsqueeze2_eltwise_fuse_pass",           //
      "squeeze2_matmul_fuse_pass",              //
      "reshape2_matmul_fuse_pass",              //
91
      "flatten2_matmul_fuse_pass",              //
92 93
      "map_matmul_to_mul_pass",                 //
      "fc_fuse_pass",                           //
94
      "conv_elementwise_add_fuse_pass",         //
95 96
      "tensorrt_subgraph_pass",                 //
      "conv_bn_fuse_pass",                      //
97 98
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
99 100 101
// cudnn8.0 has memory leak problem in conv + eltwise + act, so we
// disable the pass.
#if !(CUDNN_VERSION >= 8000 && CUDNN_VERSION < 8100)
102 103
      "conv_elementwise_add_act_fuse_pass",   //
      "conv_elementwise_add2_act_fuse_pass",  //
104 105
#endif
#endif
106 107 108
      "transpose_flatten_concat_fuse_pass",
});

石晓伟 已提交
109 110 111 112 113 114
const std::vector<std::string> kLiteSubgraphPasses({
#ifdef PADDLE_WITH_LITE
    "lite_subgraph_pass",
#endif
});

115 116
GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
  passes_.assign({
117
    //   "identity_scale_op_clean_pass",             //
118 119 120
    "is_test_pass",                                  //
        "simplify_with_basic_ops_pass",              //
        "conv_affine_channel_fuse_pass",             //
121 122
        "conv_eltwiseadd_affine_channel_fuse_pass",  //
        "conv_bn_fuse_pass",                         //
123
        "conv_eltwiseadd_bn_fuse_pass",              //
124 125
        "embedding_eltwise_layernorm_fuse_pass",     //
        "multihead_matmul_fuse_pass_v2",             //
126 127
        "squeeze2_matmul_fuse_pass",                 //
        "reshape2_matmul_fuse_pass",                 //
128
        "flatten2_matmul_fuse_pass",                 //
129
        "map_matmul_to_mul_pass",                    //
130 131
        "fc_fuse_pass",                              //
        "fc_elementwise_layernorm_fuse_pass",        //
132 133 134 135 136
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
        "conv_elementwise_add_act_fuse_pass",   //
        "conv_elementwise_add2_act_fuse_pass",  //
        "conv_elementwise_add_fuse_pass",       //
N
nhzlx 已提交
137
#endif                                          //
石晓伟 已提交
138
        "transpose_flatten_concat_fuse_pass",   //
139
        // following pass should be located in the last, since it will
140 141
        // work on all fused ops.
        "runtime_context_cache_pass"
142 143 144 145 146
  });

  use_gpu_ = true;
}

147 148 149 150 151 152 153
void GpuPassStrategy::EnableCUDNN() {
  if (!use_cudnn_) {
    passes_.insert(passes_.begin(), "cudnn_placement_pass");
  }
  use_cudnn_ = true;
}

W
Wojciech Uss 已提交
154 155
void GpuPassStrategy::EnableMKLDNN() {
  LOG(ERROR) << "GPU not support MKLDNN yet";
156 157
}

W
Wojciech Uss 已提交
158 159
void GpuPassStrategy::EnableMkldnnQuantizer() {
  LOG(ERROR) << "GPU not support MKL-DNN quantization";
Y
Yan Chunwei 已提交
160 161
}

162 163 164 165
void GpuPassStrategy::EnableMkldnnBfloat16() {
  LOG(ERROR) << "GPU not support MKL-DNN bfloat16";
}

166 167 168
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
  // NOTE the large fusions should be located in the front, so that they will
  // not be damaged by smaller ones.
169 170
  passes_.assign({"simplify_with_basic_ops_pass",  //
                  "layer_norm_fuse_pass",
171
                  "attention_lstm_fuse_pass",       //
172 173
                  "seqconv_eltadd_relu_fuse_pass",  //
                  // "seqpool_concat_fuse_pass",    //
174
                  "seqpool_cvm_concat_fuse_pass",  //
175
                  // "embedding_fc_lstm_fuse_pass", //
176 177
                  // TODO(wilber): fix correctness problem.
                  // "fc_lstm_fuse_pass",                       //
178 179 180 181
                  "mul_lstm_fuse_pass",                      //
                  "fc_gru_fuse_pass",                        //
                  "mul_gru_fuse_pass",                       //
                  "seq_concat_fc_fuse_pass",                 //
182 183
                  "squeeze2_matmul_fuse_pass",               //
                  "reshape2_matmul_fuse_pass",               //
184
                  "flatten2_matmul_fuse_pass",               //
185
                  "map_matmul_to_mul_pass",                  //
186 187 188 189 190 191 192 193
                  "fc_fuse_pass",                            //
                  "repeated_fc_relu_fuse_pass",              //
                  "squared_mat_sub_fuse_pass",               //
                  "conv_bn_fuse_pass",                       //
                  "conv_eltwiseadd_bn_fuse_pass",            //
                  "conv_transpose_bn_fuse_pass",             //
                  "conv_transpose_eltwiseadd_bn_fuse_pass",  //
                  "is_test_pass",                            //
194 195
                  // following pass should be located in the last, since
                  // it will work on all fused ops.
196
                  "runtime_context_cache_pass"});
Y
Yan Chunwei 已提交
197

198 199
  use_gpu_ = false;
}
W
Wojciech Uss 已提交
200

201 202
void CpuPassStrategy::EnableCUDNN() { LOG(ERROR) << "CPU not support cuDNN"; }

W
Wojciech Uss 已提交
203 204 205 206 207 208
void CpuPassStrategy::EnableMKLDNN() {
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_) {
    passes_.insert(passes_.begin(), "mkldnn_placement_pass");

209
    for (auto &pass : std::vector<std::string>({
210 211 212 213 214 215 216 217
             "depthwise_conv_mkldnn_pass",     //
             "conv_bn_fuse_pass",              // Execute BN passes again to
             "conv_eltwiseadd_bn_fuse_pass",   // preserve correct pass order
             "conv_affine_channel_fuse_pass",  //
             "conv_eltwiseadd_affine_channel_fuse_pass",  //
             "conv_transpose_bn_fuse_pass",               //
             "conv_transpose_eltwiseadd_bn_fuse_pass",    //
             "conv_bias_mkldnn_fuse_pass",                //
218
             "conv_transpose_bias_mkldnn_fuse_pass",
219 220 221
             "conv3d_bias_mkldnn_fuse_pass",  //
             "conv_elementwise_add_mkldnn_fuse_pass",
             "conv_concat_relu_mkldnn_fuse_pass",
222 223 224 225 226 227 228
             "conv_relu_mkldnn_fuse_pass",                 //
             "conv_leaky_relu_mkldnn_fuse_pass",           //
             "conv_relu6_mkldnn_fuse_pass",                //
             "conv_swish_mkldnn_fuse_pass",                //
             "scale_matmul_fuse_pass",                     //
             "reshape_transpose_matmul_mkldnn_fuse_pass",  //
             "matmul_transpose_reshape_fuse_pass",         //
229
             // Disabled due to topology-dependent speed-up
230 231
             // "fc_mkldnn_pass",
             // "fc_act_mkldnn_fuse_pass",
232
             "batch_norm_act_fuse_pass",
233 234
             // TODO(intel): Please fix the bug on windows.
             // https://github.com/PaddlePaddle/Paddle/issues/29710
235
             // "mkldnn_inplace_pass",  // This pass should be activated after
236 237
             // fuses. Disabled by default due to
             // little gain and lots of problems
238
         })) {
W
Wojciech Uss 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
      passes_.push_back(pass);
    }
  }
  use_mkldnn_ = true;
#else
  use_mkldnn_ = false;
#endif
}

void CpuPassStrategy::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_quantizer_) {
    passes_.push_back("cpu_quantize_placement_pass");
  }
  use_mkldnn_quantizer_ = true;
#else
  use_mkldnn_quantizer_ = false;
#endif
}

259 260
void CpuPassStrategy::EnableMkldnnBfloat16() {
#ifdef PADDLE_WITH_MKLDNN
261 262 263 264
  if (!use_mkldnn_bfloat16_) {
    passes_.push_back("cpu_bfloat16_placement_pass");
    passes_.push_back("cpu_bfloat16_pass");
  }
265 266 267 268 269 270
  use_mkldnn_bfloat16_ = true;
#else
  use_mkldnn_bfloat16_ = false;
#endif
}

271
}  // namespace paddle