engine.h 9.0 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <NvInfer.h>
#include <memory>
19
#include <string>
Y
Yan Chunwei 已提交
20
#include <unordered_map>
21
#include <unordered_set>
22
#include <vector>
N
nhzlx 已提交
23
#include "paddle/fluid/framework/tensor.h"
24
#include "paddle/fluid/framework/tensor_util.h"
25
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
Y
Yan Chunwei 已提交
26 27
#include "paddle/fluid/inference/engine.h"
#include "paddle/fluid/inference/tensorrt/helper.h"
28
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"
N
nhzlx 已提交
29
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
N
nhzlx 已提交
30
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
31
#include "paddle/fluid/inference/utils/singleton.h"
Y
Yan Chunwei 已提交
32 33 34 35 36

namespace paddle {
namespace inference {
namespace tensorrt {

N
nhzlx 已提交
37
class TRTInt8Calibrator;
Y
Yan Chunwei 已提交
38 39 40 41 42 43
/*
 * TensorRT Engine.
 *
 * There are two alternative ways to use it, one is  to build from a paddle
 * protobuf model, another way is to manully construct the network.
 */
44 45 46
class TensorRTEngine {
  using DescType = ::paddle::framework::proto::BlockDesc;

Y
Yan Chunwei 已提交
47 48 49 50
 public:
  // Weight is model parameter.
  class Weight {
   public:
51
    Weight() = default;
52
    Weight(nvinfer1::DataType dtype, void* value, size_t num_elem) {
Y
Yan Chunwei 已提交
53 54 55 56
      w_.type = dtype;
      w_.values = value;
      w_.count = num_elem;
    }
57
    const nvinfer1::Weights& get() { return w_; }
Y
Yan Chunwei 已提交
58

59 60
    std::vector<int64_t> dims;

Y
Yan Chunwei 已提交
61 62 63 64
   private:
    nvinfer1::Weights w_;
  };

65 66 67 68 69
  TensorRTEngine(
      int max_batch, int max_workspace,
      AnalysisConfig::Precision precision = AnalysisConfig::Precision::kFloat32,
      TRTInt8Calibrator* calibrator = nullptr, int device_id = 0,
      nvinfer1::ILogger& logger = NaiveLogger::Global())
Y
Yan Chunwei 已提交
70 71
      : max_batch_(max_batch),
        max_workspace_(max_workspace),
72
        precision_(precision),
N
nhzlx 已提交
73
        calibrator_(calibrator),
N
nhzlx 已提交
74
        device_id_(device_id),
75
        logger_(logger) {}
Y
Yan Chunwei 已提交
76

77
  ~TensorRTEngine() {}
Y
Yan Chunwei 已提交
78 79

  // TODO(Superjomn) implement it later when graph segmentation is supported.
80
  void Build(const DescType& paddle_model);
Y
Yan Chunwei 已提交
81

82 83
  void Execute(int batch_size, std::vector<void*>* buffers,
               cudaStream_t stream);
Y
Yan Chunwei 已提交
84 85 86 87

  // Initialize the inference network, so that TensorRT layers can add to this
  // network.
  void InitNetwork() {
N
nhzlx 已提交
88
    freshDeviceId();
89
    infer_builder_.reset(createInferBuilder(&logger_));
Y
Yan Chunwei 已提交
90 91 92 93 94 95 96 97 98 99 100 101 102 103
    infer_network_.reset(infer_builder_->createNetwork());
  }
  // After finishing adding ops, freeze this network and creates the executation
  // environment.
  void FreezeNetwork();

  // Add an input and set its name, data type and dimention.
  nvinfer1::ITensor* DeclareInput(const std::string& name,
                                  nvinfer1::DataType dtype,
                                  const nvinfer1::Dims& dim);
  // Set the offset-th output from a layer as the network's output, and set its
  // name.
  void DeclareOutput(const nvinfer1::ILayer* layer, int offset,
                     const std::string& name);
L
Luo Tao 已提交
104 105
  // Set the itensor_map_[name] as the network's output, and set its name.
  void DeclareOutput(const std::string& name);
N
nhzlx 已提交
106 107
  // Check if the ITensor has been declared
  bool HasDeclared(const std::string& name);
Y
Yan Chunwei 已提交
108

L
Luo Tao 已提交
109 110 111
  void SetITensor(const std::string& name, nvinfer1::ITensor* tensor);
  // Get an ITensor called name.
  nvinfer1::ITensor* GetITensor(const std::string& name);
Y
Yan Chunwei 已提交
112 113 114

  nvinfer1::ICudaEngine* engine() { return infer_engine_.get(); }
  nvinfer1::INetworkDefinition* network() { return infer_network_.get(); }
N
nhzlx 已提交
115 116 117 118 119 120 121 122 123

  nvinfer1::IHostMemory* Serialize() {
    PADDLE_ENFORCE(infer_engine_ != nullptr,
                   "You should build engine first and then serialize");
    ihost_memory_.reset(infer_engine_->serialize());
    return ihost_memory_.get();
  }

  void Deserialize(const std::string& engine_serialized_data) {
N
nhzlx 已提交
124
    freshDeviceId();
N
nhzlx 已提交
125 126
    infer_ptr<nvinfer1::IRuntime> runtime(createInferRuntime(&logger_));
    infer_engine_.reset(runtime->deserializeCudaEngine(
N
nhzlx 已提交
127 128
        engine_serialized_data.c_str(), engine_serialized_data.size(),
        &inference::Singleton<plugin::PluginFactoryTensorRT>::Global()));
N
nhzlx 已提交
129 130 131 132 133
    PADDLE_ENFORCE(infer_engine_ != nullptr,
                   "build cuda engine failed when deserialize engine info.!");
    infer_context_.reset(infer_engine_->createExecutionContext());
  }

134 135
  void SetRuntimeBatch(size_t batch_size);
  int GetRuntimeBatch();
N
nhzlx 已提交
136
  int GetDeviceId() { return device_id_; }
N
nhzlx 已提交
137
  nvinfer1::IPluginLayer* AddPlugin(nvinfer1::ITensor* const* inputs,
138
                                    int num_inputs, plugin::PluginTensorRT*);
139 140 141 142 143 144 145
  void SetTensorDynamicRange(nvinfer1::ITensor* tensor, float range) {
    quant_dynamic_range_[tensor] = range;
  }

  float* GetWeightCPUData(const std::string& name,
                          framework::Tensor* weight_tensor, bool enable_int8,
                          const std::vector<float>& scale = {});
N
nhzlx 已提交
146 147 148 149 150 151 152 153

  // A pointer to CPU memory is needed of the TRT weight.
  // Before TRT runs, fluid loads weight into GPU storage.
  // so we need to copy the weights from GPU to CPU in our op converter.
  // We use a map to store these weights for the weight memory is not released
  // in advance, which affecting the construction of TRT Op.
  std::unordered_map<std::string /*name*/, std::unique_ptr<framework::Tensor>>
      weight_map;
Y
Yan Chunwei 已提交
154

155 156 157 158 159 160
  void ClearWeights() {
    for (auto& weight_pair : weight_map) {
      weight_pair.second.reset(nullptr);
    }
  }

Y
Yan Chunwei 已提交
161
 private:
N
nhzlx 已提交
162 163 164 165 166
  // Each ICudaEngine object is bound to a specific GPU when it is instantiated,
  // ensure that the thread is associated with the correct device by calling
  // freshDeviceId().
  void freshDeviceId();

Y
Yan Chunwei 已提交
167 168
  // the max batch size
  int max_batch_;
169 170
  // the runtime batch size
  static int runtime_batch_;
Y
Yan Chunwei 已提交
171 172
  // the max memory size the engine uses
  int max_workspace_;
173

174
  AnalysisConfig::Precision precision_;
N
nhzlx 已提交
175 176 177
  TRTInt8Calibrator* calibrator_;
  // batch size of the current data, will be updated each Executation.
  int batch_size_{-1};
N
nhzlx 已提交
178

N
nhzlx 已提交
179
  int device_id_;
Y
Yan Chunwei 已提交
180 181 182 183
  nvinfer1::ILogger& logger_;

  // max data size for the buffers.
  std::unordered_map<std::string /*name*/, size_t /*max size*/> buffer_sizes_;
L
Luo Tao 已提交
184 185
  std::unordered_map<std::string /*name*/, nvinfer1::ITensor* /*ITensor*/>
      itensor_map_;
186

187
  std::vector<std::unique_ptr<plugin::PluginTensorRT>> owned_plugin_;
Y
Yan Chunwei 已提交
188 189 190 191

  // TensorRT related internal members
  template <typename T>
  struct Destroyer {
192 193 194 195 196
    void operator()(T* x) {
      if (x) {
        x->destroy();
      }
    }
Y
Yan Chunwei 已提交
197 198 199 200 201 202 203
  };
  template <typename T>
  using infer_ptr = std::unique_ptr<T, Destroyer<T>>;
  infer_ptr<nvinfer1::IBuilder> infer_builder_;
  infer_ptr<nvinfer1::INetworkDefinition> infer_network_;
  infer_ptr<nvinfer1::ICudaEngine> infer_engine_;
  infer_ptr<nvinfer1::IExecutionContext> infer_context_;
N
nhzlx 已提交
204
  infer_ptr<nvinfer1::IHostMemory> ihost_memory_;
205
  std::unordered_map<nvinfer1::ITensor*, float> quant_dynamic_range_;
Y
Yan Chunwei 已提交
206 207
};  // class TensorRTEngine

208 209 210 211
#define IS_TRT_VERSION_GE(version)                       \
  ((NV_TENSORRT_MAJOR * 1000 + NV_TENSORRT_MINOR * 100 + \
    NV_TENSORRT_PATCH * 10 + NV_TENSORRT_BUILD) >= version)

Y
Yan Chunwei 已提交
212 213 214 215 216 217 218 219 220 221
// Add an layer__ into engine__ with args ARGS.
// For example:
//
// Reference
// https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#charRNN_define_network
//
// will add a fully connected layer into the engine.
// TensorRT has too many layers, so that is not wise to add member functions for
// them, and an macro like this is more extensible when underlying TensorRT
// library add new layer supports.
222 223
#define TRT_ENGINE_ADD_LAYER(engine__, layer__, ...) \
  engine__->network()->add##layer__(__VA_ARGS__);
Y
Yan Chunwei 已提交
224

225 226 227 228 229 230 231 232 233 234 235 236
class TRTEngineManager {
 public:
  bool Empty() const { return engines_.size() == 0; }
  bool Has(const std::string& name) const {
    if (engines_.count(name) == 0) return false;
    return engines_.at(name).get() != nullptr;
  }

  TensorRTEngine* Get(const std::string& name) const {
    return engines_.at(name).get();
  }

237 238 239 240 241 242
  TensorRTEngine* Create(
      std::string name, int max_batch, int max_workspace,
      AnalysisConfig::Precision precision = AnalysisConfig::Precision::kFloat32,
      TRTInt8Calibrator* calibrator = nullptr, int device_id = 0,
      nvinfer1::ILogger& logger = NaiveLogger::Global()) {
    auto* p = new TensorRTEngine(max_batch, max_workspace, precision,
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
                                 calibrator, device_id, logger);
    engines_[name].reset(p);
    return p;
  }

  void DeleteAll() {
    for (auto& item : engines_) {
      item.second.reset(nullptr);
    }
  }

 private:
  std::unordered_map<std::string, std::unique_ptr<TensorRTEngine>> engines_;
};

Y
Yan Chunwei 已提交
258 259 260
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle