ir_pass_manager.cc 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/analysis/ir_pass_manager.h"
16
#include <map>
17
#include <memory>
18
#include <string>
19
#include <unordered_map>
20 21
#include <unordered_set>
#include <utility>
L
luotao1 已提交
22
#include <vector>
Y
Yan Chunwei 已提交
23
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
24 25
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/scope.h"
26 27
#include "paddle/fluid/inference/analysis/argument.h"
#include "paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h"
Y
Yan Chunwei 已提交
28
#include "paddle/fluid/string/pretty_log.h"
29 30 31 32

namespace paddle {
namespace inference {
namespace analysis {
Y
Yan Chunwei 已提交
33 34 35
using string::PrettyLogEndl;
using string::PrettyLog;
using string::Style;
36

37 38 39 40
IRPassManager::IRPassManager(Argument *argument) {
  ARGUMENT_CHECK_FIELD(argument, main_program);
  graph_ = std::unique_ptr<Graph>(new Graph(argument->main_program()));
  if (argument->Has("scope")) {
41 42 43
    auto *scope_ptr = argument->scope_ptr();
    PADDLE_ENFORCE(scope_ptr);
    graph_->SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr);
44 45 46 47
  }

  ARGUMENT_CHECK_FIELD(argument, ir_analysis_passes);
  CreatePasses(argument, argument->ir_analysis_passes());
48 49
}

50 51
void IRPassManager::CreatePasses(Argument *argument,
                                 const std::vector<std::string> &passes) {
52
  std::string pre_pass;
L
luotao1 已提交
53
  int pass_num = 0;
54
  for (const std::string &pass_name : passes) {
55
    auto pass = framework::ir::PassRegistry::Instance().Get(pass_name);
56

57
    if (pass_name == "graph_viz_pass") {
L
luotao1 已提交
58 59 60
      std::string dot_file_path = std::to_string(pass_num) + "_ir_" +
                                  (pre_pass.empty() ? "origin" : pre_pass) +
                                  ".dot";
61
      pass->Set("graph_viz_path", new std::string(std::move(dot_file_path)));
L
luotao1 已提交
62
      pass_num++;
63
    } else if (pass_name == "mkldnn_placement_pass") {
64 65 66
      pass->Set("mkldnn_enabled_op_types",
                new std::unordered_set<std::string>(
                    argument->mkldnn_enabled_op_types()));
67 68 69
    } else if (pass_name == "cudnn_placement_pass") {
      pass->Set("cudnn_enabled_op_types",
                new std::unordered_set<std::string>());
70
#ifdef PADDLE_WITH_MKLDNN
71 72 73 74 75 76 77
    } else if (pass_name == "cpu_quantize_placement_pass") {
      pass->Set("quantize_enabled_op_types",
                new std::unordered_set<std::string>(
                    argument->quantize_enabled_op_types()));
      pass->Set(
          "quantize_excluded_op_ids",
          new std::unordered_set<int>(argument->quantize_excluded_op_ids()));
78 79 80
    } else if (pass_name == "cpu_quantize_pass") {
      pass->Set("quant_var_scales",
                new VarQuantScale(argument->quant_var_scales()));
81
#endif
82
    } else if (pass_name == "tensorrt_subgraph_pass") {
83 84
      pass->Set("workspace_size", new int(argument->tensorrt_workspace_size()));
      pass->Set("max_batch_size", new int(argument->tensorrt_max_batch_size()));
85 86
      pass->Set("min_subgraph_size",
                new int(argument->tensorrt_min_subgraph_size()));
N
nhzlx 已提交
87 88
      pass->Set("program",
                new framework::ProgramDesc *(&argument->main_program()));
N
nhzlx 已提交
89

Z
Zhaolong Xing 已提交
90 91
      auto precision_mode = argument->tensorrt_precision_mode();
      bool enable_int8 = precision_mode == AnalysisConfig::Precision::kInt8;
N
nhzlx 已提交
92

93
      pass->Set("predictor_id", new int(argument->predictor_id()));
94
      bool use_calib_mode = argument->tensorrt_use_calib_mode();
N
nhzlx 已提交
95
      pass->Set("enable_int8", new bool(enable_int8));
96
      pass->Set("use_calib_mode", new bool(use_calib_mode));
Z
Zhaolong Xing 已提交
97 98
      pass->Set("precision_mode",
                new AnalysisConfig::Precision(precision_mode));
99 100 101

      bool use_static_engine = argument->tensorrt_use_static_engine();
      bool model_from_memory = argument->model_from_memory();
102 103 104
      std::string optim_cache_dir = argument->optim_cache_dir();
      bool int8_valid =
          !(model_from_memory && optim_cache_dir.empty() && enable_int8);
N
nhzlx 已提交
105
      PADDLE_ENFORCE(int8_valid,
106 107 108 109 110 111
                     "When you are in TRT INT8 mode, and load model from "
                     "memory, you should set optim_cache_dir using "
                     "config.SetOptimCacheDir()");
      PADDLE_ENFORCE(!(model_from_memory && use_static_engine),
                     "When you are using Paddle-TRT, and also using load model "
                     "from memory, you should set the use_static to false.");
N
nhzlx 已提交
112

113 114 115
      if (!optim_cache_dir.empty()) {
        pass->Set("model_opt_cache_dir", new std::string(optim_cache_dir));
      } else if (use_static_engine || enable_int8) {
116 117 118 119 120 121 122 123
        std::string model_opt_cache_dir =
            argument->Has("model_dir")
                ? argument->model_dir()
                : GetDirRoot(argument->model_program_path());
        pass->Set(
            "model_opt_cache_dir",
            new std::string(GetOrCreateModelOptCacheDir(model_opt_cache_dir)));
      }
N
nhzlx 已提交
124
      pass->Set("gpu_device_id", new int(argument->gpu_device_id()));
125
      pass->Set("use_static_engine", new bool(use_static_engine));
126
      pass->Set("model_from_memory", new bool(argument->model_from_memory()));
127
    }
M
mozga-intel 已提交
128 129 130 131
    if (pass_name == "ngraph_subgraph_pass") {
      pass->Set("program",
                new framework::ProgramDesc *(&argument->main_program()));
    }
132 133 134
    if (pass_name == "anakin_subgraph_pass") {
      pass->Set("program",
                new framework::ProgramDesc *(&argument->main_program()));
135
      pass->Set("use_gpu", new bool(argument->use_gpu()));
N
nhzlx 已提交
136
      pass->Set("gpu_device_id", new int(argument->gpu_device_id()));
137 138 139 140 141
      pass->Set("model_from_memory", new bool(argument->model_from_memory()));
      pass->Set("predictor_id", new int(argument->predictor_id()));
      pass->Set("max_input_shape", new std::map<std::string, std::vector<int>>(
                                       argument->anakin_max_input_shape()));
      pass->Set("max_batch_size", new int(argument->anakin_max_batch_size()));
142 143 144 145 146 147 148
      bool enable_int8 =
          argument->anakin_precision_mode() == AnalysisConfig::Precision::kInt8;
      pass->Set("enable_int8", new bool(enable_int8));
      pass->Set("anakin_ops_filter",
                new std::vector<std::string>(argument->anakin_ops_filter()));
      pass->Set("auto_config_layout",
                new bool(argument->anakin_auto_config_layout()));
149
    }
150
    disable_logs_ = argument->disable_logs();
151 152 153
    if (pass_name == "fc_fuse_pass") {
      pass->Set("use_gpu", new bool(argument->use_gpu()));
    }
154

155
    pre_pass = pass_name;
156 157

    passes_.emplace_back(std::move(pass));
158 159 160
  }
}

161 162 163 164 165 166 167
std::unique_ptr<Graph> IRPassManager::Apply(std::unique_ptr<Graph> graph) {
  if (passes_.empty()) {
    return graph;
  }
  PADDLE_ENFORCE(graph.get());
  // Apply all the passes
  for (const auto &pass : passes_) {
168
    if (pass->Type() != "graph_viz_pass" && !disable_logs_) {
Y
Yan Chunwei 已提交
169 170
      PrettyLogEndl(Style::H2(), "--- Running IR pass [%s]", pass->Type());
    }
171
    graph.reset(pass->Apply(graph.release()));
172
  }
G
Gabor Buella 已提交
173
  return graph;
174 175 176
}

framework::proto::ProgramDesc IRPassManager::AcquireProgram(
N
nhzlx 已提交
177
    std::unique_ptr<Graph> *graph, ProgramDesc *program) const {
178 179 180
  auto pass =
      framework::ir::PassRegistry::Instance().Get("graph_to_program_pass");

N
nhzlx 已提交
181 182
  // Direct using ProgramDesc desc(argument->main_program()) may cause
  // incomplete copies of information.
N
nhzlx 已提交
183
  ProgramDesc desc;
N
nhzlx 已提交
184
  desc.CopyFrom(*program->Proto());
185 186
  pass->SetNotOwned("program", &desc);
  auto *the_graph = graph->release();
187
  graph->reset(pass->Apply(the_graph));
188 189 190
  return *desc.Proto();
}

191 192 193
}  // namespace analysis
}  // namespace inference
}  // namespace paddle