onnxruntime_predictor.cc 12.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/api/onnxruntime_predictor.h"

#include <glog/logging.h>

#include <algorithm>
#include <fstream>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>

#include "paddle/fluid//platform/device/gpu/gpu_types.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/version.h"
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/inference/utils/io_utils.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/cpu_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"

namespace paddle {

44
paddle_infer::DataType ConvertONNXType(ONNXTensorElementDataType type) {
45 46
  switch (type) {
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
47 48 49
      return paddle_infer::DataType::FLOAT32;
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16:
      return paddle_infer::DataType::FLOAT16;
50
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
51
      return paddle_infer::DataType::INT8;
52
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
53
      return paddle_infer::DataType::INT32;
54
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
55
      return paddle_infer::DataType::INT64;
56
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
57
      return paddle_infer::DataType::UINT8;
58 59
    default:
      LOG(ERROR) << "unsupported ONNX Tensor Type: " << static_cast<int>(type);
60
      return paddle_infer::DataType::FLOAT32;
61 62 63 64 65 66 67 68 69 70 71 72 73
  }
}

bool CheckConvertToONNX(const AnalysisConfig &config) {
  if (!config.model_dir().empty()) {
    LOG(ERROR) << "Paddle2ONNX not support model_dir config";
    // TODO(heliqi jiangjiajun): Paddle2ONNX not support
    // config.model_dir() + "/__model__"
    // config.model_dir() + var_name
    return false;
  } else if (config.prog_file().empty() || config.params_file().empty()) {
    LOG(ERROR) << string::Sprintf(
        "not valid model path '%s' or program path '%s' or params path '%s'.",
74 75 76
        config.model_dir(),
        config.prog_file(),
        config.params_file());
77 78
    return false;
  }
79
  if (config.model_from_memory()) {
80 81 82 83
    return paddle2onnx::IsExportable(config.prog_file().data(),
                                     config.prog_file().size(),
                                     config.params_file().data(),
                                     config.params_file().size());
84 85 86 87
  } else {
    return paddle2onnx::IsExportable(config.prog_file().c_str(),
                                     config.params_file().c_str());
  }
88 89 90 91 92
}

bool ONNXRuntimePredictor::Init() {
  VLOG(3) << "ONNXRuntime Predictor::init()";

H
heliqi 已提交
93
  // Now ONNXRuntime only support CPU
94
  const char *device_name = config_.use_gpu() ? "Cuda" : "Cpu";
95 96 97 98 99 100
  if (config_.use_gpu()) {
    place_ = paddle::platform::CUDAPlace(config_.gpu_device_id());
  } else {
    place_ = paddle::platform::CPUPlace();
  }

101 102 103
  char *onnx_proto = nullptr;
  int out_size;
  if (config_.model_from_memory()) {
104 105
    paddle2onnx::Export(config_.prog_file().data(),
                        config_.prog_file().size(),
106
                        config_.params_file().data(),
107 108 109
                        config_.params_file().size(),
                        &onnx_proto,
                        &out_size);
110 111
  } else {
    paddle2onnx::Export(config_.prog_file().c_str(),
112 113 114
                        config_.params_file().c_str(),
                        &onnx_proto,
                        &out_size);
115
  }
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

  Ort::SessionOptions session_options;
  if (config_.ort_optimization_enabled()) {
    session_options.SetGraphOptimizationLevel(
        GraphOptimizationLevel::ORT_ENABLE_ALL);
  }
  // Turn optimization off first, and then turn it on when it's stable
  // session_options.SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
  // session_options.EnableCpuMemArena();
  // session_options.EnableMemPattern();
  // session_options.SetInterOpNumThreads(config_.cpu_math_library_num_threads());
  session_options.SetIntraOpNumThreads(config_.cpu_math_library_num_threads());
  VLOG(2) << "ONNXRuntime threads " << config_.cpu_math_library_num_threads();
  if (config_.profile_enabled()) {
    LOG(WARNING) << "ONNXRuntime Profiler is activated, which might affect the "
                    "performance";
#if defined(_WIN32)
    session_options.EnableProfiling(L"ONNX");
#else
    session_options.EnableProfiling("ONNX");
#endif
  } else {
    VLOG(2) << "ONNXRuntime Profiler is deactivated, and no profiling report "
               "will be "
               "generated.";
  }
142
  session_ = {env_, onnx_proto, static_cast<size_t>(out_size), session_options};
143
  binding_ = std::make_shared<Ort::IoBinding>(session_);
144

145 146
  Ort::MemoryInfo memory_info(
      device_name, OrtDeviceAllocator, place_.GetDeviceId(), OrtMemTypeDefault);
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
  Ort::Allocator allocator(session_, memory_info);

  size_t n_inputs = session_.GetInputCount();
  for (size_t i = 0; i < n_inputs; ++i) {
    auto input_name = session_.GetInputName(i, allocator);
    auto type_info = session_.GetInputTypeInfo(i);
    std::vector<int64_t> shape =
        type_info.GetTensorTypeAndShapeInfo().GetShape();
    ONNXTensorElementDataType data_type =
        type_info.GetTensorTypeAndShapeInfo().GetElementType();
    input_desc_.emplace_back(ONNXDesc{input_name, shape, data_type});
    allocator.Free(input_name);
  }

  size_t n_outputs = session_.GetOutputCount();
  for (size_t i = 0; i < n_outputs; ++i) {
    auto output_name = session_.GetOutputName(i, allocator);
    auto type_info = session_.GetOutputTypeInfo(i);
    std::vector<int64_t> shape =
        type_info.GetTensorTypeAndShapeInfo().GetShape();
    ONNXTensorElementDataType data_type =
        type_info.GetTensorTypeAndShapeInfo().GetElementType();
    output_desc_.emplace_back(ONNXDesc{output_name, shape, data_type});
170

171 172 173 174
    Ort::MemoryInfo out_memory_info(device_name,
                                    OrtDeviceAllocator,
                                    place_.GetDeviceId(),
                                    OrtMemTypeDefault);
175 176
    binding_->BindOutput(output_name, out_memory_info);

177 178
    allocator.Free(output_name);
  }
179 180
  delete onnx_proto;
  onnx_proto = nullptr;
181 182 183 184 185 186 187 188 189 190 191 192 193
  return true;
}

template <>
std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kONNXRuntime>(
    const AnalysisConfig &config) {
  if (config.glog_info_disabled()) {
    FLAGS_logtostderr = 1;
    FLAGS_minloglevel = 2;  // GLOG_ERROR
  }

  PADDLE_ENFORCE_EQ(
194 195
      config.is_valid(),
      true,
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
      platform::errors::InvalidArgument(
          "Note: Each config can only be used for one predictor."));

  VLOG(3) << "create ONNXRuntimePredictor";

  std::unique_ptr<PaddlePredictor> predictor(new ONNXRuntimePredictor(config));
  // Each config can only be used for one predictor.
  config.SetInValid();
  auto predictor_p = dynamic_cast<ONNXRuntimePredictor *>(predictor.get());

  if (!predictor_p->Init()) {
    return nullptr;
  }

  return predictor;
}

std::vector<std::string> ONNXRuntimePredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto input_desc : input_desc_) {
    input_names.push_back(input_desc.name);
  }
  return input_names;
}

std::map<std::string, std::vector<int64_t>>
ONNXRuntimePredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  for (auto input_desc : input_desc_) {
    input_shapes[input_desc.name] = input_desc.shape;
  }
  return input_shapes;
}

std::vector<std::string> ONNXRuntimePredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto output_desc : output_desc_) {
    output_names.push_back(output_desc.name);
  }
  return output_names;
}

238 239 240 241 242 243 244 245 246 247 248 249
bool ONNXRuntimePredictor::FindONNXDesc(const std::string &name,
                                        bool is_input) {
  if (is_input) {
    for (auto i : input_desc_)
      if (i.name == name) return true;
  } else {
    for (auto i : output_desc_)
      if (i.name == name) return true;
  }
  return false;
}

250 251
std::unique_ptr<ZeroCopyTensor> ONNXRuntimePredictor::GetInputTensor(
    const std::string &name) {
252 253
  PADDLE_ENFORCE_EQ(FindONNXDesc(name, true),
                    true,
254 255 256 257
                    platform::errors::PreconditionNotMet(
                        "The in variable named %s is not found in the "
                        "ONNXPredictor.",
                        name));
258
  std::unique_ptr<ZeroCopyTensor> res(new ZeroCopyTensor(nullptr, this));
259 260 261 262 263 264 265 266
  res->input_or_output_ = true;
  res->SetName(name);
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = place_;
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
267 268
  res->SetOrtMark(true);
  res->SetOrtBinding(binding_);
269 270 271 272 273 274 275 276
  auto iter = input_buffers_.find(name);
  if (iter == input_buffers_.end()) {
    std::vector<int8_t> i_vector;
    input_buffers_[name] = std::make_shared<std::vector<int8_t>>(i_vector);
    res->SetOrtBuffer(input_buffers_[name]);
  } else {
    res->SetOrtBuffer(iter->second);
  }
277 278 279 280 281
  return res;
}

std::unique_ptr<ZeroCopyTensor> ONNXRuntimePredictor::GetOutputTensor(
    const std::string &name) {
282 283
  PADDLE_ENFORCE_EQ(FindONNXDesc(name, false),
                    true,
284 285 286 287
                    platform::errors::PreconditionNotMet(
                        "The out variable named %s is not found in the "
                        "ONNXPredictor.",
                        name));
288
  std::unique_ptr<ZeroCopyTensor> res(new ZeroCopyTensor(nullptr, this));
289 290 291 292 293 294 295 296
  res->input_or_output_ = false;
  res->SetName(name);
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = place_;
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
297 298 299 300 301 302 303 304 305
  res->SetOrtMark(true);
  res->SetOrtBinding(binding_);
  int size = output_desc_.size();
  for (int i = 0; i < size; ++i)
    if (output_desc_[i].name == name) {
      res->idx_ = i;
      res->dtype_ = ConvertONNXType(output_desc_[i].dtype);
      break;
    }
306 307 308 309 310 311 312 313 314 315 316 317
  return res;
}

bool ONNXRuntimePredictor::Run(const std::vector<PaddleTensor> &inputs,
                               std::vector<PaddleTensor> *output_data,
                               int batch_size) {
  LOG(ERROR) << "Not support Run";
  return false;
}

bool ONNXRuntimePredictor::ZeroCopyRun() {
  try {
H
heliqi 已提交
318 319
    const char *device_name = place_ == PlaceType::kCPU ? "Cpu" : "Cuda";
    for (auto output : output_desc_) {
320 321 322 323
      Ort::MemoryInfo out_memory_info(device_name,
                                      OrtDeviceAllocator,
                                      place_.GetDeviceId(),
                                      OrtMemTypeDefault);
H
heliqi 已提交
324 325
      binding_->BindOutput(output.name.c_str(), out_memory_info);
    }
326
    session_.Run({}, *(binding_.get()));
327 328 329 330 331 332 333 334
  } catch (const std::exception &e) {
    LOG(ERROR) << e.what();
    return false;
  }

  return true;
}

335
std::unique_ptr<PaddlePredictor> ONNXRuntimePredictor::Clone(void *stream) {
336 337 338 339 340 341 342 343 344
  LOG(ERROR) << "Not support Clone(), Please create new Predictor";
  return nullptr;
}

uint64_t ONNXRuntimePredictor::TryShrinkMemory() {
  return paddle::memory::Release(place_);
}

ONNXRuntimePredictor::~ONNXRuntimePredictor() {
345 346 347
  binding_->ClearBoundInputs();
  binding_->ClearBoundOutputs();

348 349 350
  memory::Release(place_);
}

351 352 353 354 355
const void *ONNXRuntimePredictor::GetDeviceContexts() const {
  // TODO(inference): Support private device contexts.
  paddle::platform::DeviceContextPool &pool =
      paddle::platform::DeviceContextPool::Instance();
  const auto &dev_ctxs = pool.device_contexts();
356 357 358
  return &const_cast<
      std::map<phi::Place,
               std::shared_future<std::unique_ptr<phi::DeviceContext>>> &>(
359 360 361
      dev_ctxs);
}

362
}  // namespace paddle