zero_copy_tensor.cc 18.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "paddle/fluid/framework/data_layout_transform.h"
16 17 18
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
W
Wilber 已提交
19
#include "paddle/fluid/inference/api/paddle_tensor.h"
N
nhzlx 已提交
20
#include "paddle/fluid/memory/memcpy.h"
21
#include "paddle/fluid/platform/enforce.h"
22
#include "paddle/fluid/platform/float16.h"
23

24
namespace paddle_infer {
25

26 27
using float16 = paddle::platform::float16;

28
void Tensor::Reshape(const std::vector<int> &shape) {
W
Wilber 已提交
29 30
  PADDLE_ENFORCE_EQ(
      name_.empty(), false,
31
      paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
32 33 34
          "Need to SetName first, so that the corresponding tensor can "
          "be retrieved."));
  PADDLE_ENFORCE_EQ(input_or_output_, true,
35
                    paddle::platform::errors::PermissionDenied(
W
Wilber 已提交
36
                        "Can't reshape the output tensor, it is readonly"));
37
  auto *scope = static_cast<paddle::framework::Scope *>(scope_);
38
  auto *var = scope->FindVar(name_);
W
Wilber 已提交
39
  PADDLE_ENFORCE_NOT_NULL(
40
      var, paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
41
               "No tensor called [%s] in the runtime scope", name_));
42 43
  auto *tensor = var->GetMutable<paddle::framework::LoDTensor>();
  tensor->Resize(paddle::framework::make_ddim(shape));
44 45
}

S
Steffy-zxf 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
void Tensor::ReshapeStrings(const size_t &shape) {
  PADDLE_ENFORCE_EQ(
      name_.empty(), false,
      paddle::platform::errors::PreconditionNotMet(
          "Need to SetName first, so that the corresponding tensor can "
          "be retrieved."));
  PADDLE_ENFORCE_EQ(input_or_output_, true,
                    paddle::platform::errors::PermissionDenied(
                        "Can't reshape the output tensor, it is readonly"));
  auto *scope = static_cast<paddle::framework::Scope *>(scope_);
  auto *var = scope->FindVar(name_);
  PADDLE_ENFORCE_NOT_NULL(
      var, paddle::platform::errors::PreconditionNotMet(
               "No tensor called [%s] in the runtime scope", name_));
  paddle_infer::Strings *tensor = var->GetMutable<paddle_infer::Strings>();
  tensor->resize(shape);
}

#define EAGER_GET_TENSOR(tensor_type)    \
  if (!tensor_) {                        \
    tensor_ = FindTensor<tensor_type>(); \
  }                                      \
  auto *tensor = static_cast<tensor_type *>(tensor_);
69

70
template <typename T>
71
T *Tensor::mutable_data(PlaceType place) {
S
Steffy-zxf 已提交
72
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
73 74
  PADDLE_ENFORCE_GT(
      tensor->numel(), 0,
75 76
      paddle::platform::errors::PreconditionNotMet(
          "You should call Tensor::Reshape(const std::vector<int> "
W
Wilber 已提交
77 78
          "&shape)"
          "function before retrieving mutable_data from input tensor."));
79
  switch (static_cast<int>(place)) {
80 81
    case static_cast<int>(PlaceType::kCPU): {
      return tensor->mutable_data<T>(paddle::platform::CPUPlace());
82
    }
83 84 85 86 87
    case static_cast<int>(PlaceType::kGPU): {
      return tensor->mutable_data<T>(paddle::platform::CUDAPlace(device_));
    }
    case static_cast<int>(PlaceType::kXPU): {
      return tensor->mutable_data<T>(paddle::platform::XPUPlace(device_));
88
    }
89 90 91
    case static_cast<int>(PlaceType::kNPU): {
      return tensor->mutable_data<T>(paddle::platform::NPUPlace(device_));
    }
92
    default:
93
      PADDLE_THROW(paddle::platform::errors::Unavailable(
94 95
          "Only CPU / CUDA / XPU / NPU places is supported. The place `%d` is "
          "not supported.",
96
          static_cast<int>(place)));
97 98 99 100 101 102
      break;
  }
  return nullptr;
}

template <typename T>
103
T *Tensor::data(PlaceType *place, int *size) const {
S
Steffy-zxf 已提交
104
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
105 106
  auto *res = tensor->data<T>();

107 108 109 110 111 112
  if (paddle::platform::is_cpu_place(tensor->place())) {
    *place = PlaceType::kCPU;
  } else if (paddle::platform::is_gpu_place(tensor->place())) {
    *place = PlaceType::kGPU;
  } else if (paddle::platform::is_xpu_place(tensor->place())) {
    *place = PlaceType::kXPU;
113 114
  } else if (paddle::platform::is_npu_place(tensor->place())) {
    *place = PlaceType::kNPU;
115
  } else {
116
    *place = PlaceType::kUNK;
117 118 119 120 121 122
  }

  *size = tensor->numel();
  return res;
}

123
DataType Tensor::type() const {
S
Steffy-zxf 已提交
124
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
125
  auto type = tensor->type();
126 127
  if (type == paddle::framework::proto::VarType::FP32) {
    return DataType::FLOAT32;
128 129
  } else if (type == paddle::framework::proto::VarType::FP16) {
    return DataType::FLOAT16;
130 131 132 133 134 135
  } else if (type == paddle::framework::proto::VarType::INT64) {
    return DataType::INT64;
  } else if (type == paddle::framework::proto::VarType::INT32) {
    return DataType::INT32;
  } else if (type == paddle::framework::proto::VarType::UINT8) {
    return DataType::UINT8;
136 137
  } else if (type == paddle::framework::proto::VarType::INT8) {
    return DataType::INT8;
138
  }
139
  return DataType::FLOAT32;
140 141
}

142 143
PlaceType Tensor::place() const { return place_; }

N
nhzlx 已提交
144
template <typename T>
145
void Tensor::CopyFromCpu(const T *data) {
S
Steffy-zxf 已提交
146
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
W
Wilber 已提交
147
  PADDLE_ENFORCE_GE(tensor->numel(), 0,
148 149
                    paddle::platform::errors::PreconditionNotMet(
                        "You should call Tensor::Reshape(const "
W
Wilber 已提交
150 151
                        "std::vector<int> &shape)"
                        "function before copying data from cpu."));
N
nhzlx 已提交
152 153
  size_t ele_size = tensor->numel() * sizeof(T);

154 155
  if (place_ == PlaceType::kCPU) {
    auto *t_data = tensor->mutable_data<T>(paddle::platform::CPUPlace());
N
nhzlx 已提交
156
    std::memcpy(static_cast<void *>(t_data), data, ele_size);
157
  } else if (place_ == PlaceType::kGPU) {
158
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
159 160 161
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
    paddle::platform::CUDAPlace gpu_place(device_);
N
nhzlx 已提交
162
    auto *t_data = tensor->mutable_data<T>(gpu_place);
163 164
    auto *dev_ctx = static_cast<const paddle::platform::CUDADeviceContext *>(
        pool.Get(gpu_place));
N
nhzlx 已提交
165

166 167 168
    paddle::memory::Copy(gpu_place, static_cast<void *>(t_data),
                         paddle::platform::CPUPlace(), data, ele_size,
                         dev_ctx->stream());
N
nhzlx 已提交
169
#else
170 171 172
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with CUDA place because paddle is not compiled "
        "with CUDA."));
N
nhzlx 已提交
173
#endif
174
  } else if (place_ == PlaceType::kXPU) {
175
#ifdef PADDLE_WITH_XPU
176
    paddle::platform::XPUPlace xpu_place(device_);
177
    auto *t_data = tensor->mutable_data<T>(xpu_place);
178 179
    paddle::memory::Copy(xpu_place, static_cast<void *>(t_data),
                         paddle::platform::CPUPlace(), data, ele_size);
180
#else
181 182 183
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with XPU place because paddle is not compiled "
        "with XPU."));
W
Wilber 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
#endif
  } else if (place_ == PlaceType::kNPU) {
#ifdef PADDLE_WITH_ASCEND_CL
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
    paddle::platform::NPUPlace npu_place(device_);
    auto *t_data = tensor->mutable_data<T>(npu_place);
    auto *dev_ctx = static_cast<const paddle::platform::NPUDeviceContext *>(
        pool.Get(npu_place));
    paddle::memory::Copy(npu_place, static_cast<void *>(t_data),
                         paddle::platform::CPUPlace(), data, ele_size,
                         dev_ctx->stream());
#else
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with NPU place because paddle is not compiled "
        "with NPU."));
200 201 202
#endif
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
W
Wilber 已提交
203
        "The analysis predictor supports CPU, GPU, NPU and XPU now."));
N
nhzlx 已提交
204 205 206
  }
}

S
Steffy-zxf 已提交
207 208 209 210 211 212 213 214 215 216
void Tensor::CopyStringsFromCpu(const paddle_infer::Strings *data) {
  EAGER_GET_TENSOR(paddle_infer::Strings);
  PADDLE_ENFORCE_GE(tensor->size(), 0,
                    paddle::platform::errors::PreconditionNotMet(
                        "You should call Tensor::Reshape(const "
                        "std::size_t &shape)function before copying"
                        "the string data from cpu."));
  *tensor = *data;
}

N
nhzlx 已提交
217
template <typename T>
218 219
void Tensor::CopyToCpuImpl(T *data, void *exec_stream, CallbackFunc cb,
                           void *cb_params) const {
S
Steffy-zxf 已提交
220
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
N
nhzlx 已提交
221 222 223 224
  auto ele_num = tensor->numel();
  auto *t_data = tensor->data<T>();
  auto t_place = tensor->place();

225 226 227 228 229 230
  paddle::framework::Tensor out;
  auto mem_allocation = std::make_shared<paddle::memory::Allocation>(
      static_cast<void *>(data), ele_num * sizeof(T),
      paddle::platform::CPUPlace());
  out.ResetHolder(mem_allocation);

231
  if (paddle::platform::is_cpu_place(t_place)) {
232 233 234 235 236 237 238 239 240
#ifdef PADDLE_WITH_MKLDNN
    if (tensor->layout() == paddle::framework::DataLayout::kMKLDNN)
      paddle::framework::innerTransDataLayoutFromMKLDNN(
          tensor->layout(), paddle::platform::MKLDNNDeviceContext::tls()
                                .get_cur_paddle_data_layout(),
          *tensor, &out, paddle::platform::CPUPlace(), true);
    else
      std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
#else
N
nhzlx 已提交
241
    std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
242
#endif
243
  } else if (place_ == PlaceType::kGPU) {
244
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
245 246 247 248 249 250 251 252
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
    auto gpu_place = BOOST_GET_CONST(paddle::platform::CUDAPlace, t_place);
    auto *dev_ctx = static_cast<const paddle::platform::CUDADeviceContext *>(
        pool.Get(gpu_place));
    paddle::memory::Copy(paddle::platform::CPUPlace(),
                         static_cast<void *>(data), gpu_place, t_data,
                         ele_num * sizeof(T), dev_ctx->stream());
253 254 255
#ifdef PADDLE_WITH_HIP
    hipStreamSynchronize(dev_ctx->stream());
#else
256 257 258 259 260 261 262 263 264 265
    // async, return stream
    if (nullptr != exec_stream) {
      *(static_cast<cudaStream_t *>(exec_stream)) = dev_ctx->stream();
      // async with callback
    } else if (cb) {
      cudaLaunchHostFunc(dev_ctx->stream(), cb, cb_params);
      // sync
    } else {
      cudaStreamSynchronize(dev_ctx->stream());
    }
266
#endif
N
nhzlx 已提交
267
#else
268 269 270
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with CUDA place because paddle is not compiled "
        "with CUDA."));
N
nhzlx 已提交
271
#endif
272
  } else if (place_ == PlaceType::kXPU) {
273
#ifdef PADDLE_WITH_XPU
274 275 276 277
    auto xpu_place = BOOST_GET_CONST(paddle::platform::XPUPlace, t_place);
    paddle::memory::Copy(paddle::platform::CPUPlace(),
                         static_cast<void *>(data), xpu_place, t_data,
                         ele_num * sizeof(T));
278
#else
279 280 281
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with XPU place because paddle is not compiled "
        "with XPU."));
W
Wilber 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
#endif
  } else if (place_ == PlaceType::kNPU) {
#ifdef PADDLE_WITH_ASCEND_CL
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
    auto npu_place = BOOST_GET_CONST(paddle::platform::NPUPlace, t_place);
    auto *dev_ctx = static_cast<const paddle::platform::NPUDeviceContext *>(
        pool.Get(npu_place));
    paddle::memory::Copy(paddle::platform::CPUPlace(),
                         static_cast<void *>(data), npu_place, t_data,
                         ele_num * sizeof(T), dev_ctx->stream());
    aclrtSynchronizeStream(dev_ctx->stream());
#else
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with NPU place because paddle is not compiled "
        "with NPU."));
298 299 300
#endif
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
W
Wilber 已提交
301
        "The analysis predictor supports CPU, GPU, NPU and XPU now."));
N
nhzlx 已提交
302 303
  }
}
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319

template <typename T>
void Tensor::CopyToCpu(T *data) const {
  CopyToCpuImpl<T>(data, nullptr, nullptr, nullptr);
}

template <typename T>
void Tensor::CopyToCpuAsync(T *data, void *exec_stream) const {
  CopyToCpuImpl<T>(data, exec_stream, nullptr, nullptr);
}

template <typename T>
void Tensor::CopyToCpuAsync(T *data, CallbackFunc cb, void *cb_params) const {
  CopyToCpuImpl<T>(data, nullptr, cb, cb_params);
}

320 321 322 323 324
template PD_INFER_DECL void Tensor::CopyFromCpu<float>(const float *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<int64_t>(const int64_t *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<int32_t>(const int32_t *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<uint8_t>(const uint8_t *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<int8_t>(const int8_t *data);
325
template PD_INFER_DECL void Tensor::CopyFromCpu<float16>(const float16 *data);
326

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
template PD_INFER_DECL void Tensor::CopyToCpu<float>(float *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<int64_t>(int64_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<int32_t>(int32_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<uint8_t>(uint8_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<int8_t>(int8_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<float16>(float16 *data) const;

template PD_INFER_DECL void Tensor::CopyToCpuImpl<float>(float *data,
                                                         void *exec_stream,
                                                         CallbackFunc cb,
                                                         void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<int64_t>(
    int64_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<int32_t>(
    int32_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<uint8_t>(
    uint8_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<int8_t>(
    int8_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<float16>(
    float16 *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;

template PD_INFER_DECL void Tensor::CopyToCpuAsync<float>(
    float *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int64_t>(
    int64_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int32_t>(
    int32_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<uint8_t>(
    uint8_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int8_t>(
    int8_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<float16>(
    float16 *data, void *exec_stream) const;

template PD_INFER_DECL void Tensor::CopyToCpuAsync<float>(
    float *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int64_t>(
    int64_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int32_t>(
    int32_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<uint8_t>(
    uint8_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int8_t>(
    int8_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<float16>(
    float16 *data, CallbackFunc cb, void *cb_params) const;
374

375 376 377 378 379 380 381 382 383 384
template PD_INFER_DECL float *Tensor::data<float>(PlaceType *place,
                                                  int *size) const;
template PD_INFER_DECL int64_t *Tensor::data<int64_t>(PlaceType *place,
                                                      int *size) const;
template PD_INFER_DECL int32_t *Tensor::data<int32_t>(PlaceType *place,
                                                      int *size) const;
template PD_INFER_DECL uint8_t *Tensor::data<uint8_t>(PlaceType *place,
                                                      int *size) const;
template PD_INFER_DECL int8_t *Tensor::data<int8_t>(PlaceType *place,
                                                    int *size) const;
385 386
template PD_INFER_DECL float16 *Tensor::data<float16>(PlaceType *place,
                                                      int *size) const;
387

388 389 390 391 392
template PD_INFER_DECL float *Tensor::mutable_data<float>(PlaceType place);
template PD_INFER_DECL int64_t *Tensor::mutable_data<int64_t>(PlaceType place);
template PD_INFER_DECL int32_t *Tensor::mutable_data<int32_t>(PlaceType place);
template PD_INFER_DECL uint8_t *Tensor::mutable_data<uint8_t>(PlaceType place);
template PD_INFER_DECL int8_t *Tensor::mutable_data<int8_t>(PlaceType place);
393
template PD_INFER_DECL float16 *Tensor::mutable_data<float16>(PlaceType place);
394

395 396 397 398 399 400
Tensor::Tensor(void *scope) : scope_{scope} {
  PADDLE_ENFORCE_NOT_NULL(scope_,
                          paddle::platform::errors::PreconditionNotMet(
                              "The `scope` can not be nullptr. It should be "
                              "set to the pointer of scope."));
}
401

S
Steffy-zxf 已提交
402
template <typename T>
403
void *Tensor::FindTensor() const {
W
Wilber 已提交
404 405
  PADDLE_ENFORCE_EQ(
      name_.empty(), false,
406
      paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
407 408
          "Need to SetName first, so that the corresponding tensor can "
          "be retrieved."));
409
  auto *scope = static_cast<paddle::framework::Scope *>(scope_);
410
  auto *var = scope->FindVar(name_);
W
Wilber 已提交
411
  PADDLE_ENFORCE_NOT_NULL(
412
      var, paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
413
               "No tensor called [%s] in the runtime scope", name_));
S
Steffy-zxf 已提交
414
  auto *tensor = var->GetMutable<T>();
415 416 417
  return tensor;
}

418
std::vector<int> Tensor::shape() const {
S
Steffy-zxf 已提交
419
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
W
Wilber 已提交
420
  PADDLE_ENFORCE_NOT_NULL(
421
      tensor_, paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
422
                   "Not found tensor called %s in the scope", name_));
423
  return paddle::framework::vectorize<int>(tensor->dims());
424 425
}

426
void Tensor::SetLoD(const std::vector<std::vector<size_t>> &x) {
S
Steffy-zxf 已提交
427
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
428
  paddle::framework::LoD lod;
429 430 431 432 433 434
  for (auto &level : x) {
    lod.emplace_back(level);
  }
  tensor->set_lod(lod);
}

435
std::vector<std::vector<size_t>> Tensor::lod() const {
S
Steffy-zxf 已提交
436
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
437 438 439 440 441 442 443
  std::vector<std::vector<size_t>> res;
  for (auto &level : tensor->lod()) {
    res.emplace_back(level);
  }
  return res;
}

444 445 446 447 448 449 450 451 452 453
void Tensor::SetName(const std::string &name) { name_ = name; }

const std::string &Tensor::name() const { return name_; }

void Tensor::SetPlace(PlaceType place, int device) {
  place_ = place;
  device_ = device;
}

}  // namespace paddle_infer