engine.cc 10.8 KB
Newer Older
Y
Yan Chunwei 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

N
nhzlx 已提交
3 4
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
Y
Yan Chunwei 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/inference/tensorrt/engine.h"

#include <NvInfer.h>
#include <cuda.h>
#include <glog/logging.h>
A
Abhinav Arora 已提交
20
#include <string>
Y
Yan Chunwei 已提交
21
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
22 23 24 25 26 27 28
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace inference {
namespace tensorrt {

29 30
int TensorRTEngine::runtime_batch_ = 1;

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
void TensorRTEngine::InitNetwork() {
  freshDeviceId();
  infer_builder_.reset(createInferBuilder(&logger_));

  if (with_dynamic_shape_) {
#if IS_TRT_VERSION_GE(6000)
    infer_networkv2_.reset(infer_builder_->createNetworkV2(
        1U << static_cast<int>(
            nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
    infer_builder_config_.reset(infer_builder_->createBuilderConfig());
    infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_;
    optim_profile_.reset(infer_builder_->createOptimizationProfile());
#endif
  } else {
    infer_network_.reset(infer_builder_->createNetwork());
  }
Y
Yan Chunwei 已提交
47 48
}

49 50
void TensorRTEngine::Execute(int batch_size, std::vector<void *> *buffers,
                             cudaStream_t stream) {
N
nhzlx 已提交
51
  freshDeviceId();
52 53 54 55 56 57 58
  auto infer_context = context();
  if (!with_dynamic_shape()) {
    infer_context->enqueue(batch_size, buffers->data(), stream, nullptr);
  } else {
#if IS_TRT_VERSION_GE(6000)
    infer_context->enqueueV2(buffers->data(), stream, nullptr);
#endif
59
  }
N
nhzlx 已提交
60 61 62
  SetRuntimeBatch(batch_size);
}

Y
Yan Chunwei 已提交
63
void TensorRTEngine::FreezeNetwork() {
N
nhzlx 已提交
64
  freshDeviceId();
65
  VLOG(3) << "TRT to freeze network";
Y
Yan Chunwei 已提交
66 67
  PADDLE_ENFORCE(infer_builder_ != nullptr,
                 "Call InitNetwork first to initialize network.");
68 69 70
  PADDLE_ENFORCE_EQ(network() != nullptr, true,
                    platform::errors::InvalidArgument(
                        "Call InitNetwork first to initialize network."));
Y
Yan Chunwei 已提交
71 72 73
  // build engine.
  infer_builder_->setMaxBatchSize(max_batch_);
  infer_builder_->setMaxWorkspaceSize(max_workspace_);
Z
Zhaolong Xing 已提交
74
  bool enable_fp16 = (precision_ == AnalysisConfig::Precision::kHalf);
75
#if IS_TRT_VERSION_GE(5000)
Z
Zhaolong Xing 已提交
76 77 78 79 80 81
  if (enable_fp16) {
    bool support_fp16 = infer_builder_->platformHasFastFp16();
    infer_builder_->setFp16Mode(support_fp16);
    if (!support_fp16) {
      LOG(INFO) << "You specify FP16 mode, but the hardware do not support "
                   "FP16 speed up, use FP32 instead.";
82 83
    } else {
      LOG(INFO) << "Run Paddle-TRT FP16 mode";
Z
Zhaolong Xing 已提交
84 85
    }
  }
86
#else
87
  if (enable_fp16)
88
    LOG(INFO) << "Using FP16 in Paddle-TRT must ensure that the version of TRT "
89 90
                 "is at least 5."
                 "So, use FP32 to run.";
91
#endif
Z
Zhaolong Xing 已提交
92 93 94
  bool enable_int8 = (precision_ == AnalysisConfig::Precision::kInt8);

  if (enable_int8) {
N
nhzlx 已提交
95
    infer_builder_->setInt8Mode(true);
96 97 98 99 100 101 102 103 104 105 106 107 108 109
    if (calibrator_) {
      infer_builder_->setInt8Calibrator(calibrator_);
    } else {
      infer_builder_->setInt8Calibrator(nullptr);

#if IS_TRT_VERSION_GE(5000)
      infer_builder_->setStrictTypeConstraints(true);
      for (auto &quant_range : quant_dynamic_range_) {
        auto tensor = quant_range.first;
        float range = quant_range.second;
        tensor->setDynamicRange(-range, range);
      }

      std::unordered_set<nvinfer1::ITensor *> all_t;
110 111
      for (int i = 0; i < network()->getNbLayers(); i++) {
        auto layer = network()->getLayer(i);
112 113 114 115
        for (int j = 0; j < layer->getNbOutputs(); j++) {
          all_t.insert(layer->getOutput(j));
        }
      }
116 117
      for (int i = 0; i < network()->getNbInputs(); i++) {
        all_t.insert(network()->getInput(i));
118 119 120 121
      }

      for (auto &t : all_t) {
        if (!quant_dynamic_range_.count(t)) {
T
tianshuo78520a 已提交
122 123 124
          VLOG(3) << "We are in trt int8 mode(not calibration), scale not set"
                  << " for tensor " << t->getName()
                  << ", this might be ok when trt does not need this range";
125 126
        }
      }
127 128 129 130 131 132 133 134 135 136
      auto is_layer_int8 = [&](nvinfer1::ILayer *layer) -> bool {
        for (int j = 0; j < layer->getNbInputs(); j++) {
          auto *temp_in = layer->getInput(j);
          if (!temp_in->dynamicRangeIsSet()) {
            VLOG(1) << "Layer(Name: " << layer->getName()
                    << ") is set to float32 because its input("
                    << temp_in->getName() << ") doesn't have dynamic range.";
            return false;
          }
        }
137 138
        for (int j = 0; j < layer->getNbOutputs(); j++) {
          auto *temp_out = layer->getOutput(j);
139 140 141 142 143 144 145 146 147 148 149
          if (temp_out->isNetworkOutput()) {
            VLOG(1) << "Layer(Name: " << layer->getName()
                    << ") is set to float32 because its output("
                    << temp_out->getName() << ") is the output of the network.";
            return false;
          }
          if (!temp_out->dynamicRangeIsSet()) {
            VLOG(1) << "Layer(Name: " << layer->getName()
                    << ") is set to float32 because its output("
                    << temp_out->getName() << ") doesn't have dynamic range.";
            return false;
150 151
          }
        }
152 153 154 155 156 157 158 159 160 161 162
        return true;
      };
      // If a layer's output is the network's output, or not all of its inputs
      // and outputs have scales,
      // this layer's precision and output type are set to float32.
      // This step has no effect if this layer is fused during TRT optimization.
      for (int i = 0; i < network()->getNbLayers(); i++) {
        auto layer = network()->getLayer(i);
        if (!is_layer_int8(layer)) {
          layer->setPrecision(nvinfer1::DataType::kFLOAT);
        }
163
      }
164 165
#endif
    }
N
nhzlx 已提交
166
  }
Y
Yan Chunwei 已提交
167

168 169
  if (with_dynamic_shape_) {
#if IS_TRT_VERSION_GE(6000)
170
    LOG(INFO) << "Run Paddle-TRT Dynamic Shape mode.";
171 172 173 174 175 176 177 178 179 180 181 182
    for (auto &input : min_input_shape_) {
      optim_profile_->setDimensions(
          input.first.c_str(), nvinfer1::OptProfileSelector::kMIN,
          Vec2TRT_Dims(input.second, input.first, true));
      optim_profile_->setDimensions(
          input.first.c_str(), nvinfer1::OptProfileSelector::kMAX,
          Vec2TRT_Dims(max_input_shape_[input.first], input.first, true));
      optim_profile_->setDimensions(
          input.first.c_str(), nvinfer1::OptProfileSelector::kOPT,
          Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true));
    }
    infer_builder_config_->addOptimizationProfile(optim_profile_.get());
183 184 185 186 187 188 189 190 191 192
    if (WithFp16()) {
      infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16);
      if (disable_trt_plugin_fp16()) {
        LOG(INFO) << "NOTE: In order to achieve higher accuracy, you have "
                     "disabled the fp16 mode of TRT Plugin,\n"
                  << "you can reopen it with "
                     "'config.SetDynamicShapeInfo(min_shape, max_shape, "
                     "opt_shape, false /*disable_trt_plugin_fp16*/)'";
      }
    }
193 194 195 196 197 198
    infer_engine_.reset(infer_builder_->buildEngineWithConfig(
        *network(), *infer_builder_config_));
#endif
  } else {
    infer_engine_.reset(infer_builder_->buildCudaEngine(*network()));
  }
Y
Yan Chunwei 已提交
199 200 201
  PADDLE_ENFORCE(infer_engine_ != nullptr, "build cuda engine failed!");
}

202
nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name,
Y
Yan Chunwei 已提交
203
                                                nvinfer1::DataType dtype,
204
                                                const nvinfer1::Dims &dims) {
205 206 207 208
  PADDLE_ENFORCE_EQ(network() != nullptr, true,
                    platform::errors::InvalidArgument(
                        "The TRT network should be initialized first."));
  auto *input = network()->addInput(name.c_str(), dtype, dims);
Y
Yan Chunwei 已提交
209
  PADDLE_ENFORCE(input, "infer network add input %s failed", name);
210
  PADDLE_ENFORCE(input->isNetworkInput());
L
Luo Tao 已提交
211
  TensorRTEngine::SetITensor(name, input);
Y
Yan Chunwei 已提交
212 213 214
  return input;
}

215 216 217
void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer *layer, int offset,
                                   const std::string &name) {
  auto *output = layer->getOutput(offset);
218
  SetITensor(name, output);
Y
Yan Chunwei 已提交
219 220
  PADDLE_ENFORCE(output != nullptr);
  output->setName(name.c_str());
221
  PADDLE_ENFORCE(!output->isNetworkInput());
222
  network()->markOutput(*output);
223
  PADDLE_ENFORCE(output->isNetworkOutput());
N
nhzlx 已提交
224 225
}

226 227
void TensorRTEngine::DeclareOutput(const std::string &name) {
  auto *output = TensorRTEngine::GetITensor(name);
L
Luo Tao 已提交
228 229
  PADDLE_ENFORCE(output != nullptr);
  output->setName(name.c_str());
230
  PADDLE_ENFORCE(!output->isNetworkInput());
231
  network()->markOutput(*output);
L
Luo Tao 已提交
232 233
}

234 235
void TensorRTEngine::SetITensor(const std::string &name,
                                nvinfer1::ITensor *tensor) {
L
Luo Tao 已提交
236
  PADDLE_ENFORCE(tensor != nullptr);
Y
Yan Chunwei 已提交
237
  PADDLE_ENFORCE_EQ(0, itensor_map_.count(name), "duplicate ITensor name %s",
L
Luo Tao 已提交
238 239 240 241
                    name);
  itensor_map_[name] = tensor;
}

242
nvinfer1::ITensor *TensorRTEngine::GetITensor(const std::string &name) {
Y
Yan Chunwei 已提交
243
  PADDLE_ENFORCE(itensor_map_.count(name), "no ITensor %s", name);
L
Luo Tao 已提交
244 245 246
  return itensor_map_[name];
}

247 248 249 250
void TensorRTEngine::SetRuntimeBatch(size_t batch_size) {
  runtime_batch_ = batch_size;
}

251 252 253 254
float *TensorRTEngine::GetWeightCPUData(const std::string &name,
                                        framework::Tensor *weight_tensor,
                                        bool enable_int8,
                                        const std::vector<float> &scale) {
255 256
  static int name_suffix_counter = 0;
  std::string name_suffix = std::to_string(name_suffix_counter);
P
Pei Yang 已提交
257 258
  std::string splitter = "__";
  std::string name_with_suffix = name + splitter + name_suffix;
259
  platform::CPUPlace cpu_place;
260 261 262 263 264 265 266 267 268 269 270
  PADDLE_ENFORCE_EQ(
      weight_map.count(name_with_suffix), 0,
      "During TRT Op converter: We set weight %s with the same name "
      "twice into the weight_map",
      name_with_suffix);
  weight_map[name_with_suffix].reset(new framework::Tensor());
  weight_map[name_with_suffix]->Resize(weight_tensor->dims());
  TensorCopySync(*weight_tensor, cpu_place, weight_map[name_with_suffix].get());
  float *weight_data =
      weight_map[name_with_suffix]->mutable_data<float>(cpu_place);
  name_suffix_counter += 1;
271 272 273
  return weight_data;
}

274 275
int TensorRTEngine::GetRuntimeBatch() { return runtime_batch_; }

N
nhzlx 已提交
276
nvinfer1::IPluginLayer *TensorRTEngine::AddPlugin(
277 278
    nvinfer1::ITensor *const *inputs, int num_inputs,
    plugin::PluginTensorRT *plugin) {
279
  owned_plugin_.emplace_back(plugin);
280
  return network()->addPluginExt(inputs, num_inputs, *plugin);
281 282
}

N
nhzlx 已提交
283 284 285 286 287 288 289
void TensorRTEngine::freshDeviceId() {
  int count;
  cudaGetDeviceCount(&count);
  PADDLE_ENFORCE_LT(device_id_, count);
  cudaSetDevice(device_id_);
}

Y
Yan Chunwei 已提交
290 291 292
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle