diff --git a/paddle/fluid/inference/api/details/zero_copy_tensor.cc b/paddle/fluid/inference/api/details/zero_copy_tensor.cc index 0f26a1076a68ce9c5154bf0dfb1a6559024876c5..c38088a2b800218b4afe03d341499edc7913ea39 100644 --- a/paddle/fluid/inference/api/details/zero_copy_tensor.cc +++ b/paddle/fluid/inference/api/details/zero_copy_tensor.cc @@ -693,10 +693,9 @@ void Tensor::ORTCopyToCpu(T *data) const { if (place_ == PlaceType::kCPU) { std::memcpy(static_cast(data), value.GetTensorData(), size); } else { - paddle::memory::Copy(paddle::platform::CPUPlace(), - static_cast(data), - paddle::platform::CUDAPlace(device_), - value.GetTensorData(), size, nullptr); + PADDLE_THROW(paddle::platform::errors::Unavailable( + "CopyToCpu error.The current ONNXRuntime backend doesn't support " + "GPU.")); } } diff --git a/paddle/fluid/inference/api/onnxruntime_predictor.cc b/paddle/fluid/inference/api/onnxruntime_predictor.cc index eb561667fe1f3d1f624dd928fc53780bb5e63cf2..e42e395ce90f8b003ef9b7b46c4bf500bcbbe325 100644 --- a/paddle/fluid/inference/api/onnxruntime_predictor.cc +++ b/paddle/fluid/inference/api/onnxruntime_predictor.cc @@ -279,6 +279,12 @@ bool ONNXRuntimePredictor::Run(const std::vector &inputs, bool ONNXRuntimePredictor::ZeroCopyRun() { try { + const char *device_name = place_ == PlaceType::kCPU ? "Cpu" : "Cuda"; + for (auto output : output_desc_) { + Ort::MemoryInfo out_memory_info(device_name, OrtDeviceAllocator, + place_.GetDeviceId(), OrtMemTypeDefault); + binding_->BindOutput(output.name.c_str(), out_memory_info); + } session_.Run({}, *(binding_.get())); } catch (const std::exception &e) { LOG(ERROR) << e.what();