paddle_pass_builder.cc 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/api/paddle_pass_builder.h"
16 17 18
#ifdef PADDLE_WITH_CUDA
#include <cudnn.h>
#endif
19 20 21
#ifdef PADDLE_WITH_HIP
#include <miopen/miopen.h>
#endif
22
#include <glog/logging.h>
23
#include <sstream>
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

namespace paddle {

void PaddlePassBuilder::AppendPass(const std::string &pass_type) {
  passes_.push_back(pass_type);
}

void PaddlePassBuilder::TurnOnDebug() {
  std::vector<std::string> passes;
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it != "graph_viz_pass") {
      it = passes_.insert(it + 1, "graph_viz_pass");
    } else {
      ++it;
    }
  }
}

std::string PaddlePassBuilder::DebugString() {
  std::stringstream ss;
  ss << "Passes to apply:\n";
  for (auto &pass : passes_) {
    ss << "  - " << pass << '\n';
  }
  return ss.str();
}

void PaddlePassBuilder::DeletePass(const std::string &pass_type) {
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it == pass_type) {
      it = passes_.erase(it);
    } else {
      ++it;
    }
  }
}

void PaddlePassBuilder::InsertPass(size_t idx, const std::string &pass_type) {
  passes_.insert(std::begin(passes_) + idx, pass_type);
}

void PaddlePassBuilder::DeletePass(size_t idx) {
  passes_.erase(std::begin(passes_) + idx);
}

W
Wojciech Uss 已提交
71 72
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
  analysis_passes_.push_back(pass);
73 74
}

W
Wojciech Uss 已提交
75 76
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }

77
const std::vector<std::string> kTRTSubgraphPasses({
78 79
  "conv_affine_channel_fuse_pass",  //
      "adaptive_pool2d_convert_global_pass",
80
      "conv_eltwiseadd_affine_channel_fuse_pass",  //
81
      "shuffle_channel_detect_pass",               //
82 83
      "quant_conv2d_dequant_fuse_pass",            //
      "delete_quant_dequant_op_pass",              //
84
      "delete_quant_dequant_filter_op_pass",       //
W
Wangzheee 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
      // "fc_fuse_pass",                        //
      "simplify_with_basic_ops_pass",                 //
      "embedding_eltwise_layernorm_fuse_pass",        //
      "preln_embedding_eltwise_layernorm_fuse_pass",  //
      "multihead_matmul_fuse_pass_v2",                //
      "multihead_matmul_fuse_pass_v3",                //
      "skip_layernorm_fuse_pass",                     //
      "preln_skip_layernorm_fuse_pass",               //
      "conv_bn_fuse_pass",                            //
      "unsqueeze2_eltwise_fuse_pass",                 //
      "trt_squeeze2_matmul_fuse_pass",                //
      "trt_reshape2_matmul_fuse_pass",                //
      "trt_flatten2_matmul_fuse_pass",                //
      "trt_map_matmul_v2_to_mul_pass",                //
      "trt_map_matmul_v2_to_matmul_pass",             //
      "trt_map_matmul_to_mul_pass",                   //
      "fc_fuse_pass",                                 //
      "conv_elementwise_add_fuse_pass",               //
103 104 105
      "add_support_int8_pass",
      "tensorrt_subgraph_pass",  //
      "conv_bn_fuse_pass",       //
106 107
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
108 109 110
// cudnn8.0 has memory leak problem in conv + eltwise + act, so we
// disable the pass.
#if !(CUDNN_VERSION >= 8000 && CUDNN_VERSION < 8100)
111 112
      "conv_elementwise_add_act_fuse_pass",   //
      "conv_elementwise_add2_act_fuse_pass",  //
113 114
#endif
#endif
115 116 117
      "transpose_flatten_concat_fuse_pass",
});

D
denglin-github 已提交
118 119
const std::vector<std::string> kDlnneSubgraphPasses({
    "is_test_pass",                  //
D
denglin-github 已提交
120
    "delete_dropout_op_pass"         //
D
denglin-github 已提交
121 122 123 124 125 126 127
    "simplify_with_basic_ops_pass",  //
    "conv_bn_fuse_pass",             //
    "depthwise_conv_bn_fuse_pass",   //
    "shuffle_channel_detect_pass",   //
    "dlnne_subgraph_pass",           //
});

石晓伟 已提交
128 129 130 131 132 133
const std::vector<std::string> kLiteSubgraphPasses({
#ifdef PADDLE_WITH_LITE
    "lite_subgraph_pass",
#endif
});

134 135
GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
  passes_.assign({
136
    //   "identity_scale_op_clean_pass",             //
137 138 139
    "is_test_pass",                                  //
        "simplify_with_basic_ops_pass",              //
        "conv_affine_channel_fuse_pass",             //
140 141
        "conv_eltwiseadd_affine_channel_fuse_pass",  //
        "conv_bn_fuse_pass",                         //
142
        "conv_eltwiseadd_bn_fuse_pass",              //
143 144
        "embedding_eltwise_layernorm_fuse_pass",     //
        "multihead_matmul_fuse_pass_v2",             //
145 146 147 148 149 150
        "gpu_cpu_squeeze2_matmul_fuse_pass",         //
        "gpu_cpu_reshape2_matmul_fuse_pass",         //
        "gpu_cpu_flatten2_matmul_fuse_pass",         //
        "gpu_cpu_map_matmul_v2_to_mul_pass",         //
        "gpu_cpu_map_matmul_v2_to_matmul_pass",      //
        "gpu_cpu_map_matmul_to_mul_pass",            //
151 152
        "fc_fuse_pass",                              //
        "fc_elementwise_layernorm_fuse_pass",        //
153 154
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
155 156 157
// cudnn8.0 has memory leak problem in conv + eltwise + act, so we
// disable the pass.
#if !(CUDNN_VERSION >= 8000 && CUDNN_VERSION < 8100)
158 159
        "conv_elementwise_add_act_fuse_pass",   //
        "conv_elementwise_add2_act_fuse_pass",  //
160 161 162 163
#endif
        "conv_elementwise_add_fuse_pass",      //
#endif                                         //
        "transpose_flatten_concat_fuse_pass",  //
164
        // following pass should be located in the last, since it will
165 166
        // work on all fused ops.
        "runtime_context_cache_pass"
167 168 169 170 171
  });

  use_gpu_ = true;
}

172 173 174 175 176 177 178
void GpuPassStrategy::EnableCUDNN() {
  if (!use_cudnn_) {
    passes_.insert(passes_.begin(), "cudnn_placement_pass");
  }
  use_cudnn_ = true;
}

W
Wojciech Uss 已提交
179 180
void GpuPassStrategy::EnableMKLDNN() {
  LOG(ERROR) << "GPU not support MKLDNN yet";
181 182
}

W
Wojciech Uss 已提交
183 184
void GpuPassStrategy::EnableMkldnnQuantizer() {
  LOG(ERROR) << "GPU not support MKL-DNN quantization";
Y
Yan Chunwei 已提交
185 186
}

187 188 189 190
void GpuPassStrategy::EnableMkldnnBfloat16() {
  LOG(ERROR) << "GPU not support MKL-DNN bfloat16";
}

191 192 193
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
  // NOTE the large fusions should be located in the front, so that they will
  // not be damaged by smaller ones.
194 195
  passes_.assign({"simplify_with_basic_ops_pass",  //
                  "layer_norm_fuse_pass",
196
                  "attention_lstm_fuse_pass",       //
197 198
                  "seqconv_eltadd_relu_fuse_pass",  //
                  // "seqpool_concat_fuse_pass",    //
199
                  "seqpool_cvm_concat_fuse_pass",  //
200
                  // "embedding_fc_lstm_fuse_pass", //
201
                  // TODO(wilber): fix correctness problem.
202
                  // "fc_lstm_fuse_pass",                    //
203 204 205 206
                  "mul_lstm_fuse_pass",                      //
                  "fc_gru_fuse_pass",                        //
                  "mul_gru_fuse_pass",                       //
                  "seq_concat_fc_fuse_pass",                 //
207 208 209
                  "gpu_cpu_squeeze2_matmul_fuse_pass",       //
                  "gpu_cpu_reshape2_matmul_fuse_pass",       //
                  "gpu_cpu_flatten2_matmul_fuse_pass",       //
H
heliqi 已提交
210
                  "matmul_v2_scale_fuse_pass",               //
211 212
                  "gpu_cpu_map_matmul_v2_to_mul_pass",       //
                  "gpu_cpu_map_matmul_v2_to_matmul_pass",    //
H
heliqi 已提交
213
                  "matmul_scale_fuse_pass",                  //
214
                  "gpu_cpu_map_matmul_to_mul_pass",          //
215 216 217 218 219 220 221 222
                  "fc_fuse_pass",                            //
                  "repeated_fc_relu_fuse_pass",              //
                  "squared_mat_sub_fuse_pass",               //
                  "conv_bn_fuse_pass",                       //
                  "conv_eltwiseadd_bn_fuse_pass",            //
                  "conv_transpose_bn_fuse_pass",             //
                  "conv_transpose_eltwiseadd_bn_fuse_pass",  //
                  "is_test_pass",                            //
223 224
                  // following pass should be located in the last, since
                  // it will work on all fused ops.
225
                  "runtime_context_cache_pass"});
Y
Yan Chunwei 已提交
226

227 228
  use_gpu_ = false;
}
W
Wojciech Uss 已提交
229

230 231
void CpuPassStrategy::EnableCUDNN() { LOG(ERROR) << "CPU not support cuDNN"; }

W
Wojciech Uss 已提交
232 233 234 235 236 237
void CpuPassStrategy::EnableMKLDNN() {
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_) {
    passes_.insert(passes_.begin(), "mkldnn_placement_pass");

238
    for (auto &pass : std::vector<std::string>({
239 240 241 242 243 244 245 246
             "depthwise_conv_mkldnn_pass",     //
             "conv_bn_fuse_pass",              // Execute BN passes again to
             "conv_eltwiseadd_bn_fuse_pass",   // preserve correct pass order
             "conv_affine_channel_fuse_pass",  //
             "conv_eltwiseadd_affine_channel_fuse_pass",  //
             "conv_transpose_bn_fuse_pass",               //
             "conv_transpose_eltwiseadd_bn_fuse_pass",    //
             "conv_bias_mkldnn_fuse_pass",                //
247
             "conv_transpose_bias_mkldnn_fuse_pass",
248 249
             // TODO(baoachun): Need to support 5-dimensional input.
             // "conv3d_bias_mkldnn_fuse_pass",  //
250 251
             "conv_elementwise_add_mkldnn_fuse_pass",
             "conv_concat_relu_mkldnn_fuse_pass",
B
baoachun 已提交
252 253 254 255 256
             "conv_relu_mkldnn_fuse_pass",          //
             "conv_leaky_relu_mkldnn_fuse_pass",    //
             "conv_relu6_mkldnn_fuse_pass",         //
             "conv_swish_mkldnn_fuse_pass",         //
             "conv_hard_swish_mkldnn_fuse_pass",    //
257
             "conv_mish_mkldnn_fuse_pass",          //
B
baoachun 已提交
258
             "conv_hard_sigmoid_mkldnn_fuse_pass",  //
259
             // TODO(baoachun) fix int8 accuracy
B
baoachun 已提交
260
             "conv_gelu_mkldnn_fuse_pass",
261 262 263 264 265
             "scale_matmul_fuse_pass",                        //
             "reshape_transpose_matmul_mkldnn_fuse_pass",     //
             "reshape_transpose_matmul_v2_mkldnn_fuse_pass",  //
             "matmul_transpose_reshape_fuse_pass",            //
             "matmul_v2_transpose_reshape_fuse_pass",         //
266
             // Disabled due to topology-dependent speed-up
H
heliqi 已提交
267 268
             //  "fc_mkldnn_pass",
             //  "fc_act_mkldnn_fuse_pass",
269 270
             "batch_norm_act_fuse_pass",              //
             "softplus_activation_mkldnn_fuse_pass",  //
271 272
             // TODO(intel): Please fix the bug on windows.
             // https://github.com/PaddlePaddle/Paddle/issues/29710
273
             // "mkldnn_inplace_pass",  // This pass should be activated after
274 275
             // fuses. Disabled by default due to
             // little gain and lots of problems
276
         })) {
W
Wojciech Uss 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
      passes_.push_back(pass);
    }
  }
  use_mkldnn_ = true;
#else
  use_mkldnn_ = false;
#endif
}

void CpuPassStrategy::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_quantizer_) {
    passes_.push_back("cpu_quantize_placement_pass");
  }
  use_mkldnn_quantizer_ = true;
#else
  use_mkldnn_quantizer_ = false;
#endif
}

297 298
void CpuPassStrategy::EnableMkldnnBfloat16() {
#ifdef PADDLE_WITH_MKLDNN
299 300 301
  if (!use_mkldnn_bfloat16_) {
    passes_.push_back("cpu_bfloat16_placement_pass");
    passes_.push_back("cpu_bfloat16_pass");
302
    passes_.push_back("cpu_quantize_squash_pass");
303
  }
304 305 306 307 308 309
  use_mkldnn_bfloat16_ = true;
#else
  use_mkldnn_bfloat16_ = false;
#endif
}

J
jianghaicheng 已提交
310 311 312 313
IpuPassStrategy::IpuPassStrategy() : PassStrategy({}) {
  passes_.assign({"inference_process_pass"});
}

314
}  // namespace paddle