tensorrt_subgraph_pass.cc 11.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include <algorithm>
16
#include <map>
N
nhzlx 已提交
17
#include <set>
18

19 20 21
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h"
22
#include "paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h"
N
nhzlx 已提交
23 24
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
25
#include "paddle/fluid/inference/tensorrt/op_teller.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/string/pretty_log.h"
27 28 29 30 31 32 33 34 35 36 37

namespace paddle {
namespace inference {
namespace analysis {

using framework::ir::Node;

std::unique_ptr<framework::ir::Graph> analysis::TensorRtSubgraphPass::ApplyImpl(
    std::unique_ptr<framework::ir::Graph> graph) const {
  framework::ir::FusePassBase::Init("tensorrt_subgraph_pass", graph.get());

38 39 40 41
  auto teller = [](const framework::ir::Node *node) {
    if (!node->IsOp() || !node->Op()) return false;
    return tensorrt::OpTeller::Global().Tell(node->Op()->Type(), *node->Op());
  };
42

43 44
  SubGraphFuser fuser(graph.get(), teller,
                      Get<int>("min_subgraph_size") /*min subgraph size*/);
45 46
  fuser();

47 48 49 50 51 52
  std::vector<std::string> graph_param_names =
      ExtractParameters(graph->Nodes());
  // those parameter already exist in trt, and should not have another copy in
  // fluid.
  std::vector<std::string> repetitive_params;

53 54
  for (auto *node : graph->Nodes()) {
    if (node->IsOp() && !Agent(node).subgraph()->empty()) {
55 56
      CreateTensorRTOp(node, graph.get(), graph_param_names,
                       &repetitive_params);
57 58 59 60 61 62 63 64 65 66 67 68 69 70

      std::unordered_set<const Node *> nodes2remove(
          Agent(node).subgraph()->begin(), Agent(node).subgraph()->end());
      framework::ir::GraphSafeRemoveNodes(graph.get(), nodes2remove);
    }
  }

  std::unordered_set<const Node *> nodes2remove;
  for (auto *node : graph->Nodes()) {
    if (node->IsOp() && Agent(node).deleted()) {
      nodes2remove.insert(node);
    }
  }
  framework::ir::GraphSafeRemoveNodes(graph.get(), nodes2remove);
71 72
  graph->Set(framework::ir::kRepetitiveParamAttr,
             new std::vector<std::string>(repetitive_params));
73 74 75 76

  return graph;
}

N
nhzlx 已提交
77
std::string GenerateEngineKey(const std::set<std::string> &engine_inputs,
N
nhzlx 已提交
78 79
                              const std::set<std::string> &engine_outputs,
                              const std::string &predictor_id) {
N
nhzlx 已提交
80 81 82 83 84 85 86
  std::string engine_hash_key = "";
  for (auto name : engine_inputs) {
    engine_hash_key += name;
  }
  for (auto name : engine_outputs) {
    engine_hash_key += name;
  }
N
nhzlx 已提交
87
  engine_hash_key += predictor_id;
N
nhzlx 已提交
88 89 90 91
  auto engine_key = std::to_string(std::hash<std::string>()(engine_hash_key));
  return engine_key;
}

92 93 94 95
void TensorRtSubgraphPass::CreateTensorRTOp(
    framework::ir::Node *node, Graph *graph,
    const std::vector<std::string> &graph_params,
    std::vector<std::string> *repetitive_params) const {
96 97 98 99
  auto *op_desc = node->Op();
  auto &subgraph = *Agent(node).subgraph();
  PADDLE_ENFORCE(!subgraph.empty());

N
nhzlx 已提交
100 101 102 103 104 105 106 107
  framework::ProgramDesc *program_desc =
      Get<framework::ProgramDesc *>("program");
  // Add new block for TensorRTEngineOP
  const framework::BlockDesc &main_block =
      program_desc->Block(framework::kRootBlockIndex);
  // const framework::BlockDesc& main_block = program_desc->Block(0);
  framework::BlockDesc *new_block = program_desc->AppendBlock(main_block);

108 109 110 111 112
  // An fake block desc.
  framework::proto::BlockDesc block_proto;
  framework::BlockDesc block_desc(nullptr, &block_proto);
  block_desc.Proto()->set_parent_idx(-1);
  block_desc.Proto()->set_idx(0);
Y
Yan Chunwei 已提交
113 114 115
  string::PrettyLogDetail("---  detect a sub-graph with %d nodes",
                          subgraph.size());

116
  for (auto *node : subgraph) {
N
nhzlx 已提交
117
    auto *new_block_op = new_block->AppendOp();
118
    auto *op = block_desc.AppendOp();
N
nhzlx 已提交
119
    *new_block_op->Proto() = *node->Op()->Proto();
120 121 122
    *op->Proto() = *node->Op()->Proto();
  }

N
nhzlx 已提交
123 124 125 126
  // Then, we will use the input_names_with_id and output_names_with_id to
  // generate the eigine key.
  // So, We use set instead of unordered_set here to ensure that the engine key
  // is unique.
N
nhzlx 已提交
127 128
  std::set<std::string> input_names;
  std::set<std::string> input_names_with_id;
129 130 131
  std::vector<std::string> params;

  // The node->inputs containes input tensors and parameters.
132 133 134
  for (auto *x : node->inputs) {
    input_names.insert(x->Name());
    input_names_with_id.insert(x->Name() + std::to_string(x->id()));
135 136 137
    if (std::count(graph_params.begin(), graph_params.end(), x->Name()) > 0) {
      params.push_back(x->Name());
    }
138
  }
139

N
nhzlx 已提交
140 141
  std::set<std::string> output_names;
  std::set<std::string> output_names_with_id;
142 143 144 145 146 147
  for (auto *x : node->outputs) {
    output_names.insert(x->Name());
    output_names_with_id.insert(x->Name() + std::to_string(x->id()));
  }

  std::unordered_map<std::string, std::string> output_name_map;
N
nhzlx 已提交
148
  auto &subgraph_nodes = *Agent(node).subgraph();
149 150 151 152 153 154 155 156 157 158 159 160 161

  // The following procedure is used to rename all the intermediate
  // variables and the output variables of the subgraph.
  // Why we do this?
  // During the transition from fluid OP to tensorrt OP, we map
  // the input and output Tensor(fluid data structure) of fluid OP
  // to the corresponding ITensor (trt data structure) through the
  // Tensor name. When we set up ITensor for an variable, we must
  // ensure that it has not been set before.
  // If there is variable in the fluid graph, which is not only the
  // input of a OP, but also the output of a Op, there will be problems.
  // So we have to rename the variable in the subgraph to make sure
  // it is either an OP's input or an OP's output.
N
nhzlx 已提交
162 163
  RenameAndGetOutputs(subgraph_nodes, &block_desc, input_names_with_id,
                      &output_names_with_id, &output_names, &output_name_map);
164 165 166 167 168 169 170 171 172

  // When tensorrt engine runs at the end of the operation,
  // output_mapping help us copy the data from the renamed ITensor
  // to Tensor.
  std::vector<std::string> output_mapping;
  for (auto name : output_names) {
    PADDLE_ENFORCE(output_name_map.count(name) != 0);
    output_mapping.push_back(output_name_map[name]);
  }
173
  PADDLE_ENFORCE(!output_mapping.empty());
174

X
Xin Pan 已提交
175 176 177 178 179 180
  auto *vars = block_desc.Proto()->mutable_vars();
  for (framework::ir::Node *node : graph->Nodes()) {
    if (node->IsVar() && node->Var()) {
      *vars->Add() = *node->Var()->Proto();
    }
  }
N
nhzlx 已提交
181

182 183
  PADDLE_ENFORCE(!block_desc.Proto()->vars().empty(),
                 "the block has no var-desc");
N
nhzlx 已提交
184

185 186
  // Set attrs
  op_desc->SetType("tensorrt_engine");
N
nhzlx 已提交
187 188 189 190 191 192
  op_desc->SetInput(
      "Xs", std::vector<std::string>(input_names.begin(), input_names.end()));

  op_desc->SetOutput(
      "Ys", std::vector<std::string>(output_names.begin(), output_names.end()));

N
nhzlx 已提交
193
  op_desc->SetBlockAttr("sub_block", new_block);
194 195 196 197 198
  SetAttr(op_desc->Proto(), "subgraph",
          block_desc.Proto()->SerializeAsString());
  SetAttr(op_desc->Proto(), "max_batch_size", Get<int>("max_batch_size"));
  SetAttr(op_desc->Proto(), "workspace_size", Get<int>("workspace_size"));
  SetAttr(op_desc->Proto(), "output_name_mapping", output_mapping);
199
  SetAttr(op_desc->Proto(), "parameters", params);
N
nhzlx 已提交
200

N
nhzlx 已提交
201
  auto enable_int8 = Get<bool>("enable_int8");
202
  auto use_static_engine = Get<bool>("use_static_engine");
N
nhzlx 已提交
203
  auto engine_key = GenerateEngineKey(input_names_with_id, output_names_with_id,
N
nhzlx 已提交
204
                                      std::to_string(0));
N
nhzlx 已提交
205

N
nhzlx 已提交
206
  // Get "" when there is no cached calibration table data.
207 208
  bool load_from_memory = Get<bool>("model_from_memory");
  std::string calibration_data = "";
N
nhzlx 已提交
209
  if (enable_int8) {
210 211 212
    calibration_data = GetTrtCalibTableData(
        Get<std::string>("model_opt_cache_dir"), engine_key, enable_int8);
  }
N
nhzlx 已提交
213
  SetAttr(op_desc->Proto(), "calibration_data", calibration_data);
N
nhzlx 已提交
214 215

  SetAttr(op_desc->Proto(), "enable_int8", enable_int8);
N
nhzlx 已提交
216
  SetAttr(op_desc->Proto(), "engine_key", engine_key);
217
  std::string trt_engine_serialized_data = "";
N
nhzlx 已提交
218

219 220
  SetAttr(op_desc->Proto(), "engine_serialized_data",
          trt_engine_serialized_data);
N
nhzlx 已提交
221 222 223 224 225 226 227 228

  std::unique_ptr<tensorrt::TRTInt8Calibrator> calibrator;
  if (enable_int8 && calibration_data.size() != 0) {
    calibrator.reset(new tensorrt::TRTInt8Calibrator(calibration_data));
  }
  // When in int8 mode and calibration_mode, the program just produce the
  // calibration table data.
  bool calibration_mode = (enable_int8 && calibration_data.size() == 0);
N
nhzlx 已提交
229 230 231 232
  if (calibration_mode) {
    // calibraion mode means generate int8 calibration table data process.
    return;
  }
N
nhzlx 已提交
233

N
nhzlx 已提交
234 235 236 237 238 239 240 241 242 243 244
  std::copy(params.begin(), params.end(),
            std::back_inserter(*repetitive_params));
  bool need_serialize = (use_static_engine && !load_from_memory);

  if (need_serialize) {
    trt_engine_serialized_data = GetTrtEngineSerializedData(
        Get<std::string>("model_opt_cache_dir"), engine_key);
    // we can load the engine info serialized before from the disk.
    if (!trt_engine_serialized_data.empty()) {
      SetAttr(op_desc->Proto(), "engine_serialized_data",
              trt_engine_serialized_data);
N
nhzlx 已提交
245
      LOG(INFO) << "Load TRT Optimized Info from "
N
nhzlx 已提交
246 247
                << GetTrtEngineSerializedPath(
                       Get<std::string>("model_opt_cache_dir"), engine_key);
N
nhzlx 已提交
248
      return;
249 250 251
    }
  }

N
nhzlx 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
  // the following code will NOT run in following situation:
  // 1. calibraion mode (generate trt int8 calibraiton table data)
  // 2. already load serialized trt engine info.
  LOG(INFO) << "Prepare TRT engine (Optimize model structure, Select OP "
               "kernel etc). This process may cost a lot of time.";
  std::unique_ptr<tensorrt::TensorRTEngine> trt_engine(
      new tensorrt::TensorRTEngine(
          Get<int>("max_batch_size"), Get<int>("workspace_size"), enable_int8,
          calibrator.get(), Get<int>("gpu_device_id")));
  auto *scope = param_scope();
  framework::BlockDesc block_desc_temp(nullptr, block_desc.Proto());
  std::unordered_set<std::string> param_set(params.begin(), params.end());
  inference::Singleton<inference::tensorrt::OpConverter>::Global()
      .ConvertBlockToTRTEngine(
          &block_desc_temp, *scope,
          std::vector<std::string>(input_names.begin(), input_names.end()),
          param_set, output_mapping, trt_engine.get());
  nvinfer1::IHostMemory *serialized_engine_data = trt_engine->Serialize();
  trt_engine_serialized_data =
      std::string((const char *)serialized_engine_data->data(),
                  serialized_engine_data->size());

  if (need_serialize) {
    SaveTrtEngineSerializedDataToFile(
        GetTrtEngineSerializedPath(Get<std::string>("model_opt_cache_dir"),
                                   engine_key),
        trt_engine_serialized_data);
N
nhzlx 已提交
279
  }
N
nhzlx 已提交
280 281
  SetAttr(op_desc->Proto(), "engine_serialized_data",
          trt_engine_serialized_data);
N
nhzlx 已提交
282 283
}

284 285 286 287 288 289 290
}  // namespace analysis
}  // namespace inference
}  // namespace paddle

REGISTER_PASS(tensorrt_subgraph_pass,
              paddle::inference::analysis::TensorRtSubgraphPass)
    .RequirePassAttr("max_batch_size")
291 292
    .RequirePassAttr("workspace_size")
    .RequirePassAttr("min_subgraph_size");