paddle_pass_builder.cc 9.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/api/paddle_pass_builder.h"
16 17 18
#ifdef PADDLE_WITH_CUDA
#include <cudnn.h>
#endif
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
#include <glog/logging.h>

namespace paddle {

void PaddlePassBuilder::AppendPass(const std::string &pass_type) {
  passes_.push_back(pass_type);
}

void PaddlePassBuilder::TurnOnDebug() {
  std::vector<std::string> passes;
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it != "graph_viz_pass") {
      it = passes_.insert(it + 1, "graph_viz_pass");
    } else {
      ++it;
    }
  }
}

std::string PaddlePassBuilder::DebugString() {
  std::stringstream ss;
  ss << "Passes to apply:\n";
  for (auto &pass : passes_) {
    ss << "  - " << pass << '\n';
  }
  return ss.str();
}

void PaddlePassBuilder::DeletePass(const std::string &pass_type) {
  auto it = std::begin(passes_);
  while (it != std::end(passes_)) {
    if (*it == pass_type) {
      it = passes_.erase(it);
    } else {
      ++it;
    }
  }
}

void PaddlePassBuilder::InsertPass(size_t idx, const std::string &pass_type) {
  passes_.insert(std::begin(passes_) + idx, pass_type);
}

void PaddlePassBuilder::DeletePass(size_t idx) {
  passes_.erase(std::begin(passes_) + idx);
}

W
Wojciech Uss 已提交
67 68
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
  analysis_passes_.push_back(pass);
69 70
}

W
Wojciech Uss 已提交
71 72
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }

73
const std::vector<std::string> kTRTSubgraphPasses({
74 75
  "conv_affine_channel_fuse_pass",  //
      "adaptive_pool2d_convert_global_pass",
76
      "conv_eltwiseadd_affine_channel_fuse_pass",  //
77
      "shuffle_channel_detect_pass",               //
78 79
      "quant_conv2d_dequant_fuse_pass",            //
      "delete_quant_dequant_op_pass",              //
80
      "delete_quant_dequant_filter_op_pass",       //
P
Pei Yang 已提交
81
      // "fc_fuse_pass",                                 //
82 83 84 85
      "simplify_with_basic_ops_pass",           //
      "embedding_eltwise_layernorm_fuse_pass",  //
      "multihead_matmul_fuse_pass_v2",          //
      "skip_layernorm_fuse_pass",               //
86 87 88 89
      "conv_bn_fuse_pass",                      //
      "unsqueeze2_eltwise_fuse_pass",           //
      "squeeze2_matmul_fuse_pass",              //
      "reshape2_matmul_fuse_pass",              //
90
      "flatten2_matmul_fuse_pass",              //
91 92
      "map_matmul_to_mul_pass",                 //
      "fc_fuse_pass",                           //
93
      "conv_elementwise_add_fuse_pass",         //
94 95
      "tensorrt_subgraph_pass",                 //
      "conv_bn_fuse_pass",                      //
96 97 98 99 100 101 102 103
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
      "conv_elementwise_add_act_fuse_pass",   //
      "conv_elementwise_add2_act_fuse_pass",  //
#endif                                        //
      "transpose_flatten_concat_fuse_pass",
});

石晓伟 已提交
104 105 106 107 108 109
const std::vector<std::string> kLiteSubgraphPasses({
#ifdef PADDLE_WITH_LITE
    "lite_subgraph_pass",
#endif
});

110 111
GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
  passes_.assign({
112
    //   "identity_scale_op_clean_pass",             //
113 114 115
    "is_test_pass",                                  //
        "simplify_with_basic_ops_pass",              //
        "conv_affine_channel_fuse_pass",             //
116 117
        "conv_eltwiseadd_affine_channel_fuse_pass",  //
        "conv_bn_fuse_pass",                         //
118
        "conv_eltwiseadd_bn_fuse_pass",              //
119 120
        "embedding_eltwise_layernorm_fuse_pass",     //
        "multihead_matmul_fuse_pass_v2",             //
121 122
        "squeeze2_matmul_fuse_pass",                 //
        "reshape2_matmul_fuse_pass",                 //
123
        "flatten2_matmul_fuse_pass",                 //
124
        "map_matmul_to_mul_pass",                    //
125 126
        "fc_fuse_pass",                              //
        "fc_elementwise_layernorm_fuse_pass",        //
127 128 129 130 131
#if CUDNN_VERSION >= 7100  // To run conv_fusion, the version of cudnn must be
                           // guaranteed at least v7
        "conv_elementwise_add_act_fuse_pass",   //
        "conv_elementwise_add2_act_fuse_pass",  //
        "conv_elementwise_add_fuse_pass",       //
N
nhzlx 已提交
132
#endif                                          //
石晓伟 已提交
133
        "transpose_flatten_concat_fuse_pass",   //
134
        // following pass should be located in the last, since it will
135 136
        // work on all fused ops.
        "runtime_context_cache_pass"
137 138 139 140 141
  });

  use_gpu_ = true;
}

142 143 144 145 146 147 148
void GpuPassStrategy::EnableCUDNN() {
  if (!use_cudnn_) {
    passes_.insert(passes_.begin(), "cudnn_placement_pass");
  }
  use_cudnn_ = true;
}

W
Wojciech Uss 已提交
149 150
void GpuPassStrategy::EnableMKLDNN() {
  LOG(ERROR) << "GPU not support MKLDNN yet";
151 152
}

W
Wojciech Uss 已提交
153 154
void GpuPassStrategy::EnableMkldnnQuantizer() {
  LOG(ERROR) << "GPU not support MKL-DNN quantization";
Y
Yan Chunwei 已提交
155 156
}

157 158 159 160
void GpuPassStrategy::EnableMkldnnBfloat16() {
  LOG(ERROR) << "GPU not support MKL-DNN bfloat16";
}

161 162 163
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
  // NOTE the large fusions should be located in the front, so that they will
  // not be damaged by smaller ones.
164 165
  passes_.assign({"simplify_with_basic_ops_pass",  //
                  "layer_norm_fuse_pass",
166
                  "attention_lstm_fuse_pass",       //
167 168
                  "seqconv_eltadd_relu_fuse_pass",  //
                  // "seqpool_concat_fuse_pass",    //
169
                  "seqpool_cvm_concat_fuse_pass",  //
170
                  // "embedding_fc_lstm_fuse_pass", //
171 172
                  // TODO(wilber): fix correctness problem.
                  // "fc_lstm_fuse_pass",                       //
173 174 175 176
                  "mul_lstm_fuse_pass",                      //
                  "fc_gru_fuse_pass",                        //
                  "mul_gru_fuse_pass",                       //
                  "seq_concat_fc_fuse_pass",                 //
177 178
                  "squeeze2_matmul_fuse_pass",               //
                  "reshape2_matmul_fuse_pass",               //
179
                  "flatten2_matmul_fuse_pass",               //
180
                  "map_matmul_to_mul_pass",                  //
181 182 183 184 185 186 187 188
                  "fc_fuse_pass",                            //
                  "repeated_fc_relu_fuse_pass",              //
                  "squared_mat_sub_fuse_pass",               //
                  "conv_bn_fuse_pass",                       //
                  "conv_eltwiseadd_bn_fuse_pass",            //
                  "conv_transpose_bn_fuse_pass",             //
                  "conv_transpose_eltwiseadd_bn_fuse_pass",  //
                  "is_test_pass",                            //
189 190
                  // following pass should be located in the last, since
                  // it will work on all fused ops.
191
                  "runtime_context_cache_pass"});
Y
Yan Chunwei 已提交
192

193 194
  use_gpu_ = false;
}
W
Wojciech Uss 已提交
195

196 197
void CpuPassStrategy::EnableCUDNN() { LOG(ERROR) << "CPU not support cuDNN"; }

W
Wojciech Uss 已提交
198 199 200 201 202 203
void CpuPassStrategy::EnableMKLDNN() {
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_) {
    passes_.insert(passes_.begin(), "mkldnn_placement_pass");

204
    for (auto &pass : std::vector<std::string>({
205 206 207 208 209 210 211 212
             "depthwise_conv_mkldnn_pass",     //
             "conv_bn_fuse_pass",              // Execute BN passes again to
             "conv_eltwiseadd_bn_fuse_pass",   // preserve correct pass order
             "conv_affine_channel_fuse_pass",  //
             "conv_eltwiseadd_affine_channel_fuse_pass",  //
             "conv_transpose_bn_fuse_pass",               //
             "conv_transpose_eltwiseadd_bn_fuse_pass",    //
             "conv_bias_mkldnn_fuse_pass",                //
213
             "conv_transpose_bias_mkldnn_fuse_pass",
214 215 216
             "conv3d_bias_mkldnn_fuse_pass",  //
             "conv_elementwise_add_mkldnn_fuse_pass",
             "conv_concat_relu_mkldnn_fuse_pass",
217 218 219 220 221 222 223
             "conv_relu_mkldnn_fuse_pass",                 //
             "conv_leaky_relu_mkldnn_fuse_pass",           //
             "conv_relu6_mkldnn_fuse_pass",                //
             "conv_swish_mkldnn_fuse_pass",                //
             "scale_matmul_fuse_pass",                     //
             "reshape_transpose_matmul_mkldnn_fuse_pass",  //
             "matmul_transpose_reshape_fuse_pass",         //
224
             // Disabled due to topology-dependent speed-up
225 226
             // "fc_mkldnn_pass",
             // "fc_act_mkldnn_fuse_pass",
227
             "batch_norm_act_fuse_pass",
228 229
             // TODO(intel): Please fix the bug on windows.
             // https://github.com/PaddlePaddle/Paddle/issues/29710
230 231 232
             //"mkldnn_inplace_pass",  // This pass should be activated after
             // fuses. Disabled by default due to
             // little gain and lots of problems
233
         })) {
W
Wojciech Uss 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
      passes_.push_back(pass);
    }
  }
  use_mkldnn_ = true;
#else
  use_mkldnn_ = false;
#endif
}

void CpuPassStrategy::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
  if (!use_mkldnn_quantizer_) {
    passes_.push_back("cpu_quantize_placement_pass");
  }
  use_mkldnn_quantizer_ = true;
#else
  use_mkldnn_quantizer_ = false;
#endif
}

254 255
void CpuPassStrategy::EnableMkldnnBfloat16() {
#ifdef PADDLE_WITH_MKLDNN
256 257 258 259
  if (!use_mkldnn_bfloat16_) {
    passes_.push_back("cpu_bfloat16_placement_pass");
    passes_.push_back("cpu_bfloat16_pass");
  }
260 261 262 263 264 265
  use_mkldnn_bfloat16_ = true;
#else
  use_mkldnn_bfloat16_ = false;
#endif
}

266
}  // namespace paddle