onnxruntime_predictor.cc 13.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/api/onnxruntime_predictor.h"

#include <glog/logging.h>

#include <algorithm>
#include <fstream>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>

#include "paddle/fluid/framework/scope.h"
28 29
#include "paddle/fluid/framework/var_type_traits.h"
#include "paddle/fluid/framework/variable_helper.h"
30 31 32 33 34 35 36 37 38 39 40 41 42
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/inference/utils/io_utils.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/cpu_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"

namespace paddle {

43
paddle_infer::DataType ConvertONNXType(ONNXTensorElementDataType type) {
44 45
  switch (type) {
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
46 47 48
      return paddle_infer::DataType::FLOAT32;
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16:
      return paddle_infer::DataType::FLOAT16;
49
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
50
      return paddle_infer::DataType::INT8;
51
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
52
      return paddle_infer::DataType::INT32;
53
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
54
      return paddle_infer::DataType::INT64;
55
    case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
56
      return paddle_infer::DataType::UINT8;
57 58
    default:
      LOG(ERROR) << "unsupported ONNX Tensor Type: " << static_cast<int>(type);
59
      return paddle_infer::DataType::FLOAT32;
60 61 62 63 64 65 66 67 68 69 70 71 72
  }
}

bool CheckConvertToONNX(const AnalysisConfig &config) {
  if (!config.model_dir().empty()) {
    LOG(ERROR) << "Paddle2ONNX not support model_dir config";
    // TODO(heliqi jiangjiajun): Paddle2ONNX not support
    // config.model_dir() + "/__model__"
    // config.model_dir() + var_name
    return false;
  } else if (config.prog_file().empty() || config.params_file().empty()) {
    LOG(ERROR) << string::Sprintf(
        "not valid model path '%s' or program path '%s' or params path '%s'.",
73 74 75
        config.model_dir(),
        config.prog_file(),
        config.params_file());
76 77
    return false;
  }
78
  if (config.model_from_memory()) {
79 80 81 82
    return paddle2onnx::IsExportable(config.prog_file().data(),
                                     config.prog_file().size(),
                                     config.params_file().data(),
                                     config.params_file().size());
83 84 85 86
  } else {
    return paddle2onnx::IsExportable(config.prog_file().c_str(),
                                     config.params_file().c_str());
  }
87 88 89 90 91
}

bool ONNXRuntimePredictor::Init() {
  VLOG(3) << "ONNXRuntime Predictor::init()";

H
heliqi 已提交
92
  // Now ONNXRuntime only support CPU
93
  const char *device_name = config_.use_gpu() ? "Cuda" : "Cpu";
94 95 96 97 98
  if (config_.use_gpu()) {
    place_ = paddle::platform::CUDAPlace(config_.gpu_device_id());
  } else {
    place_ = paddle::platform::CPUPlace();
  }
99
  scope_.reset(new paddle::framework::Scope());
100

101 102 103
  char *onnx_proto = nullptr;
  int out_size;
  if (config_.model_from_memory()) {
104 105
    paddle2onnx::Export(config_.prog_file().data(),
                        config_.prog_file().size(),
106
                        config_.params_file().data(),
107 108 109
                        config_.params_file().size(),
                        &onnx_proto,
                        &out_size);
110 111
  } else {
    paddle2onnx::Export(config_.prog_file().c_str(),
112 113 114
                        config_.params_file().c_str(),
                        &onnx_proto,
                        &out_size);
115
  }
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

  Ort::SessionOptions session_options;
  if (config_.ort_optimization_enabled()) {
    session_options.SetGraphOptimizationLevel(
        GraphOptimizationLevel::ORT_ENABLE_ALL);
  }
  // Turn optimization off first, and then turn it on when it's stable
  // session_options.SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
  // session_options.EnableCpuMemArena();
  // session_options.EnableMemPattern();
  // session_options.SetInterOpNumThreads(config_.cpu_math_library_num_threads());
  session_options.SetIntraOpNumThreads(config_.cpu_math_library_num_threads());
  VLOG(2) << "ONNXRuntime threads " << config_.cpu_math_library_num_threads();
  if (config_.profile_enabled()) {
    LOG(WARNING) << "ONNXRuntime Profiler is activated, which might affect the "
                    "performance";
#if defined(_WIN32)
    session_options.EnableProfiling(L"ONNX");
#else
    session_options.EnableProfiling("ONNX");
#endif
  } else {
    VLOG(2) << "ONNXRuntime Profiler is deactivated, and no profiling report "
               "will be "
               "generated.";
  }
142
  session_ = {env_, onnx_proto, static_cast<size_t>(out_size), session_options};
143
  binding_ = std::make_shared<Ort::IoBinding>(session_);
144

145 146
  Ort::MemoryInfo memory_info(
      device_name, OrtDeviceAllocator, place_.GetDeviceId(), OrtMemTypeDefault);
147 148 149
  Ort::Allocator allocator(session_, memory_info);

  size_t n_inputs = session_.GetInputCount();
150 151
  framework::proto::VarType::Type proto_type =
      framework::proto::VarType::LOD_TENSOR;
152 153 154 155 156 157 158 159
  for (size_t i = 0; i < n_inputs; ++i) {
    auto input_name = session_.GetInputName(i, allocator);
    auto type_info = session_.GetInputTypeInfo(i);
    std::vector<int64_t> shape =
        type_info.GetTensorTypeAndShapeInfo().GetShape();
    ONNXTensorElementDataType data_type =
        type_info.GetTensorTypeAndShapeInfo().GetElementType();
    input_desc_.emplace_back(ONNXDesc{input_name, shape, data_type});
160 161 162 163

    auto *ptr = scope_->Var(input_name);
    framework::InitializeVariable(ptr, proto_type);

164 165 166 167 168 169 170 171 172 173 174 175
    allocator.Free(input_name);
  }

  size_t n_outputs = session_.GetOutputCount();
  for (size_t i = 0; i < n_outputs; ++i) {
    auto output_name = session_.GetOutputName(i, allocator);
    auto type_info = session_.GetOutputTypeInfo(i);
    std::vector<int64_t> shape =
        type_info.GetTensorTypeAndShapeInfo().GetShape();
    ONNXTensorElementDataType data_type =
        type_info.GetTensorTypeAndShapeInfo().GetElementType();
    output_desc_.emplace_back(ONNXDesc{output_name, shape, data_type});
176

177 178 179 180
    Ort::MemoryInfo out_memory_info(device_name,
                                    OrtDeviceAllocator,
                                    place_.GetDeviceId(),
                                    OrtMemTypeDefault);
181 182
    binding_->BindOutput(output_name, out_memory_info);

183 184
    allocator.Free(output_name);
  }
185 186
  delete onnx_proto;
  onnx_proto = nullptr;
187 188 189 190 191 192 193 194 195 196 197 198 199
  return true;
}

template <>
std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kONNXRuntime>(
    const AnalysisConfig &config) {
  if (config.glog_info_disabled()) {
    FLAGS_logtostderr = 1;
    FLAGS_minloglevel = 2;  // GLOG_ERROR
  }

  PADDLE_ENFORCE_EQ(
200 201
      config.is_valid(),
      true,
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
      platform::errors::InvalidArgument(
          "Note: Each config can only be used for one predictor."));

  VLOG(3) << "create ONNXRuntimePredictor";

  std::unique_ptr<PaddlePredictor> predictor(new ONNXRuntimePredictor(config));
  // Each config can only be used for one predictor.
  config.SetInValid();
  auto predictor_p = dynamic_cast<ONNXRuntimePredictor *>(predictor.get());

  if (!predictor_p->Init()) {
    return nullptr;
  }

  return predictor;
}

std::vector<std::string> ONNXRuntimePredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto input_desc : input_desc_) {
    input_names.push_back(input_desc.name);
  }
  return input_names;
}

std::map<std::string, std::vector<int64_t>>
ONNXRuntimePredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  for (auto input_desc : input_desc_) {
    input_shapes[input_desc.name] = input_desc.shape;
  }
  return input_shapes;
}

std::vector<std::string> ONNXRuntimePredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto output_desc : output_desc_) {
    output_names.push_back(output_desc.name);
  }
  return output_names;
}

244 245 246 247 248 249 250 251 252 253 254 255
bool ONNXRuntimePredictor::FindONNXDesc(const std::string &name,
                                        bool is_input) {
  if (is_input) {
    for (auto i : input_desc_)
      if (i.name == name) return true;
  } else {
    for (auto i : output_desc_)
      if (i.name == name) return true;
  }
  return false;
}

256 257
std::unique_ptr<ZeroCopyTensor> ONNXRuntimePredictor::GetInputTensor(
    const std::string &name) {
258 259 260 261 262 263 264
  PADDLE_ENFORCE_NOT_NULL(scope_->FindVar(name),
                          platform::errors::PreconditionNotMet(
                              "The in variable named %s is not found in the "
                              "ONNXPredictor.",
                              name));
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(scope_.get()), this));
265 266 267 268 269 270 271 272 273 274 275 276 277
  res->input_or_output_ = true;
  res->SetName(name);
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = place_;
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
  return res;
}

std::unique_ptr<ZeroCopyTensor> ONNXRuntimePredictor::GetOutputTensor(
    const std::string &name) {
278 279
  PADDLE_ENFORCE_EQ(FindONNXDesc(name, false),
                    true,
280 281 282 283
                    platform::errors::PreconditionNotMet(
                        "The out variable named %s is not found in the "
                        "ONNXPredictor.",
                        name));
284
  std::unique_ptr<ZeroCopyTensor> res(new ZeroCopyTensor(nullptr, this));
285 286 287 288 289 290 291 292
  res->input_or_output_ = false;
  res->SetName(name);
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
    auto gpu_place = place_;
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
293 294 295 296 297 298 299 300 301
  res->SetOrtMark(true);
  res->SetOrtBinding(binding_);
  int size = output_desc_.size();
  for (int i = 0; i < size; ++i)
    if (output_desc_[i].name == name) {
      res->idx_ = i;
      res->dtype_ = ConvertONNXType(output_desc_[i].dtype);
      break;
    }
302 303 304
  return res;
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
Ort::Value ONNXRuntimePredictor::GetOrtValue(const ONNXDesc &desc,
                                             const char *device_name) {
  Ort::MemoryInfo memory_info(
      device_name, OrtDeviceAllocator, place_.GetDeviceId(), OrtMemTypeDefault);
  auto *var = scope_->FindVar(desc.name);
  auto *tensor = var->GetMutable<framework::LoDTensor>();
  size_t size =
      tensor->numel() *
      framework::SizeOfType(framework::TransToProtoVarType(tensor->dtype()));
  std::vector<int64_t> shape = phi::vectorize<int64_t>(tensor->dims());
  return Ort::Value::CreateTensor(memory_info,
                                  static_cast<void *>(tensor->data()),
                                  size,
                                  shape.data(),
                                  shape.size(),
                                  desc.dtype);
}

323 324 325 326 327 328 329 330 331
bool ONNXRuntimePredictor::Run(const std::vector<PaddleTensor> &inputs,
                               std::vector<PaddleTensor> *output_data,
                               int batch_size) {
  LOG(ERROR) << "Not support Run";
  return false;
}

bool ONNXRuntimePredictor::ZeroCopyRun() {
  try {
332 333 334 335 336 337 338
    const char *device_name = platform::is_cpu_place(place_) ? "Cpu" : "Cuda";
    std::vector<Ort::Value> inputs;
    inputs.reserve(input_desc_.size());
    for (auto desc : input_desc_) {
      inputs.push_back(GetOrtValue(desc, device_name));
      binding_->BindInput(desc.name.c_str(), inputs.back());
    }
H
heliqi 已提交
339
    for (auto output : output_desc_) {
340 341 342 343
      Ort::MemoryInfo out_memory_info(device_name,
                                      OrtDeviceAllocator,
                                      place_.GetDeviceId(),
                                      OrtMemTypeDefault);
H
heliqi 已提交
344 345
      binding_->BindOutput(output.name.c_str(), out_memory_info);
    }
346
    session_.Run({}, *(binding_.get()));
347 348 349 350 351 352 353 354
  } catch (const std::exception &e) {
    LOG(ERROR) << e.what();
    return false;
  }

  return true;
}

355
std::unique_ptr<PaddlePredictor> ONNXRuntimePredictor::Clone(void *stream) {
356 357 358 359
  std::lock_guard<std::mutex> lk(clone_mutex_);
  auto *x = new ONNXRuntimePredictor(config_);
  x->Init();
  return std::unique_ptr<PaddlePredictor>(x);
360 361 362 363 364 365 366
}

uint64_t ONNXRuntimePredictor::TryShrinkMemory() {
  return paddle::memory::Release(place_);
}

ONNXRuntimePredictor::~ONNXRuntimePredictor() {
367 368 369
  binding_->ClearBoundInputs();
  binding_->ClearBoundOutputs();

370 371 372
  memory::Release(place_);
}

373 374 375 376 377
const void *ONNXRuntimePredictor::GetDeviceContexts() const {
  // TODO(inference): Support private device contexts.
  paddle::platform::DeviceContextPool &pool =
      paddle::platform::DeviceContextPool::Instance();
  const auto &dev_ctxs = pool.device_contexts();
378 379 380
  return &const_cast<
      std::map<phi::Place,
               std::shared_future<std::unique_ptr<phi::DeviceContext>>> &>(
381 382 383
      dev_ctxs);
}

384
}  // namespace paddle