zero_copy_tensor.cc 20.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "paddle/fluid/framework/data_layout_transform.h"
16 17 18
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
W
Wilber 已提交
19
#include "paddle/fluid/inference/api/paddle_tensor.h"
N
nhzlx 已提交
20
#include "paddle/fluid/memory/memcpy.h"
21
#include "paddle/fluid/platform/enforce.h"
22
#include "paddle/fluid/platform/float16.h"
23

24
namespace paddle_infer {
25

26 27
using float16 = paddle::platform::float16;

28
void Tensor::Reshape(const std::vector<int> &shape) {
W
Wilber 已提交
29 30
  PADDLE_ENFORCE_EQ(
      name_.empty(), false,
31
      paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
32 33 34
          "Need to SetName first, so that the corresponding tensor can "
          "be retrieved."));
  PADDLE_ENFORCE_EQ(input_or_output_, true,
35
                    paddle::platform::errors::PermissionDenied(
W
Wilber 已提交
36
                        "Can't reshape the output tensor, it is readonly"));
37
  auto *scope = static_cast<paddle::framework::Scope *>(scope_);
38
  auto *var = scope->FindVar(name_);
W
Wilber 已提交
39
  PADDLE_ENFORCE_NOT_NULL(
40
      var, paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
41
               "No tensor called [%s] in the runtime scope", name_));
42 43
  auto *tensor = var->GetMutable<paddle::framework::LoDTensor>();
  tensor->Resize(paddle::framework::make_ddim(shape));
44 45
}

S
Steffy-zxf 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
void Tensor::ReshapeStrings(const size_t &shape) {
  PADDLE_ENFORCE_EQ(
      name_.empty(), false,
      paddle::platform::errors::PreconditionNotMet(
          "Need to SetName first, so that the corresponding tensor can "
          "be retrieved."));
  PADDLE_ENFORCE_EQ(input_or_output_, true,
                    paddle::platform::errors::PermissionDenied(
                        "Can't reshape the output tensor, it is readonly"));
  auto *scope = static_cast<paddle::framework::Scope *>(scope_);
  auto *var = scope->FindVar(name_);
  PADDLE_ENFORCE_NOT_NULL(
      var, paddle::platform::errors::PreconditionNotMet(
               "No tensor called [%s] in the runtime scope", name_));
  paddle_infer::Strings *tensor = var->GetMutable<paddle_infer::Strings>();
  tensor->resize(shape);
}

#define EAGER_GET_TENSOR(tensor_type)    \
  if (!tensor_) {                        \
    tensor_ = FindTensor<tensor_type>(); \
  }                                      \
  auto *tensor = static_cast<tensor_type *>(tensor_);
69

70
template <typename T>
71
T *Tensor::mutable_data(PlaceType place) {
S
Steffy-zxf 已提交
72
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
73 74
  PADDLE_ENFORCE_GT(
      tensor->numel(), 0,
75 76
      paddle::platform::errors::PreconditionNotMet(
          "You should call Tensor::Reshape(const std::vector<int> "
W
Wilber 已提交
77 78
          "&shape)"
          "function before retrieving mutable_data from input tensor."));
79
  switch (static_cast<int>(place)) {
80 81
    case static_cast<int>(PlaceType::kCPU): {
      return tensor->mutable_data<T>(paddle::platform::CPUPlace());
82
    }
83 84 85 86 87
    case static_cast<int>(PlaceType::kGPU): {
      return tensor->mutable_data<T>(paddle::platform::CUDAPlace(device_));
    }
    case static_cast<int>(PlaceType::kXPU): {
      return tensor->mutable_data<T>(paddle::platform::XPUPlace(device_));
88
    }
89 90 91
    case static_cast<int>(PlaceType::kNPU): {
      return tensor->mutable_data<T>(paddle::platform::NPUPlace(device_));
    }
92
    default:
93
      PADDLE_THROW(paddle::platform::errors::Unavailable(
94 95
          "Only CPU / CUDA / XPU / NPU places is supported. The place `%d` is "
          "not supported.",
96
          static_cast<int>(place)));
97 98 99 100 101 102
      break;
  }
  return nullptr;
}

template <typename T>
103
T *Tensor::data(PlaceType *place, int *size) const {
S
Steffy-zxf 已提交
104
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
105 106
  auto *res = tensor->data<T>();

107 108 109 110 111 112
  if (paddle::platform::is_cpu_place(tensor->place())) {
    *place = PlaceType::kCPU;
  } else if (paddle::platform::is_gpu_place(tensor->place())) {
    *place = PlaceType::kGPU;
  } else if (paddle::platform::is_xpu_place(tensor->place())) {
    *place = PlaceType::kXPU;
113 114
  } else if (paddle::platform::is_npu_place(tensor->place())) {
    *place = PlaceType::kNPU;
115
  } else {
116
    *place = PlaceType::kUNK;
117 118 119 120 121 122
  }

  *size = tensor->numel();
  return res;
}

123
DataType Tensor::type() const {
S
Steffy-zxf 已提交
124
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
125
  auto type = tensor->type();
126 127
  if (type == paddle::framework::proto::VarType::FP32) {
    return DataType::FLOAT32;
128 129
  } else if (type == paddle::framework::proto::VarType::FP16) {
    return DataType::FLOAT16;
130 131 132 133 134 135
  } else if (type == paddle::framework::proto::VarType::INT64) {
    return DataType::INT64;
  } else if (type == paddle::framework::proto::VarType::INT32) {
    return DataType::INT32;
  } else if (type == paddle::framework::proto::VarType::UINT8) {
    return DataType::UINT8;
136 137
  } else if (type == paddle::framework::proto::VarType::INT8) {
    return DataType::INT8;
138
  }
139
  return DataType::FLOAT32;
140 141
}

142 143
PlaceType Tensor::place() const { return place_; }

N
nhzlx 已提交
144
template <typename T>
145
void Tensor::CopyFromCpu(const T *data) {
S
Steffy-zxf 已提交
146
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
W
Wilber 已提交
147
  PADDLE_ENFORCE_GE(tensor->numel(), 0,
148 149
                    paddle::platform::errors::PreconditionNotMet(
                        "You should call Tensor::Reshape(const "
W
Wilber 已提交
150 151
                        "std::vector<int> &shape)"
                        "function before copying data from cpu."));
N
nhzlx 已提交
152 153
  size_t ele_size = tensor->numel() * sizeof(T);

154 155
  if (place_ == PlaceType::kCPU) {
    auto *t_data = tensor->mutable_data<T>(paddle::platform::CPUPlace());
N
nhzlx 已提交
156
    std::memcpy(static_cast<void *>(t_data), data, ele_size);
157
  } else if (place_ == PlaceType::kGPU) {
158
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
159 160 161
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
    paddle::platform::CUDAPlace gpu_place(device_);
N
nhzlx 已提交
162
    auto *t_data = tensor->mutable_data<T>(gpu_place);
163 164
    auto *dev_ctx = static_cast<const paddle::platform::CUDADeviceContext *>(
        pool.Get(gpu_place));
N
nhzlx 已提交
165

166 167 168
    paddle::memory::Copy(gpu_place, static_cast<void *>(t_data),
                         paddle::platform::CPUPlace(), data, ele_size,
                         dev_ctx->stream());
N
nhzlx 已提交
169
#else
170 171 172
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with CUDA place because paddle is not compiled "
        "with CUDA."));
N
nhzlx 已提交
173
#endif
174
  } else if (place_ == PlaceType::kXPU) {
175
#ifdef PADDLE_WITH_XPU
176
    paddle::platform::XPUPlace xpu_place(device_);
177
    auto *t_data = tensor->mutable_data<T>(xpu_place);
178 179
    paddle::memory::Copy(xpu_place, static_cast<void *>(t_data),
                         paddle::platform::CPUPlace(), data, ele_size);
180
#else
181 182 183
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with XPU place because paddle is not compiled "
        "with XPU."));
W
Wilber 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
#endif
  } else if (place_ == PlaceType::kNPU) {
#ifdef PADDLE_WITH_ASCEND_CL
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
    paddle::platform::NPUPlace npu_place(device_);
    auto *t_data = tensor->mutable_data<T>(npu_place);
    auto *dev_ctx = static_cast<const paddle::platform::NPUDeviceContext *>(
        pool.Get(npu_place));
    paddle::memory::Copy(npu_place, static_cast<void *>(t_data),
                         paddle::platform::CPUPlace(), data, ele_size,
                         dev_ctx->stream());
#else
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with NPU place because paddle is not compiled "
        "with NPU."));
200 201 202
#endif
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
W
Wilber 已提交
203
        "The analysis predictor supports CPU, GPU, NPU and XPU now."));
N
nhzlx 已提交
204 205 206
  }
}

S
Steffy-zxf 已提交
207 208 209 210 211 212 213 214 215 216
void Tensor::CopyStringsFromCpu(const paddle_infer::Strings *data) {
  EAGER_GET_TENSOR(paddle_infer::Strings);
  PADDLE_ENFORCE_GE(tensor->size(), 0,
                    paddle::platform::errors::PreconditionNotMet(
                        "You should call Tensor::Reshape(const "
                        "std::size_t &shape)function before copying"
                        "the string data from cpu."));
  *tensor = *data;
}

N
nhzlx 已提交
217
template <typename T>
218 219
void Tensor::CopyToCpuImpl(T *data, void *exec_stream, CallbackFunc cb,
                           void *cb_params) const {
S
Steffy-zxf 已提交
220
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
N
nhzlx 已提交
221 222 223 224
  auto ele_num = tensor->numel();
  auto *t_data = tensor->data<T>();
  auto t_place = tensor->place();

225
  paddle::framework::Tensor out;
226 227 228 229
  auto mem_allocation =
      std::make_shared<paddle::memory::allocation::Allocation>(
          static_cast<void *>(data), ele_num * sizeof(T),
          paddle::platform::CPUPlace());
230 231
  out.ResetHolder(mem_allocation);

232
  if (paddle::platform::is_cpu_place(t_place)) {
233 234 235 236 237 238 239 240 241
#ifdef PADDLE_WITH_MKLDNN
    if (tensor->layout() == paddle::framework::DataLayout::kMKLDNN)
      paddle::framework::innerTransDataLayoutFromMKLDNN(
          tensor->layout(), paddle::platform::MKLDNNDeviceContext::tls()
                                .get_cur_paddle_data_layout(),
          *tensor, &out, paddle::platform::CPUPlace(), true);
    else
      std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
#else
N
nhzlx 已提交
242
    std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
J
jianghaicheng 已提交
243 244 245 246 247 248 249 250
#endif
  } else if (paddle::platform::is_ipu_place(t_place)) {
#ifdef PADDLE_WITH_IPU
    std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
#else
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with IPU place because paddle is not compiled "
        "with IPU."));
251
#endif
252
  } else if (place_ == PlaceType::kGPU) {
253
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
254 255
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
256
    auto gpu_place = t_place;
257 258 259 260 261
    auto *dev_ctx = static_cast<const paddle::platform::CUDADeviceContext *>(
        pool.Get(gpu_place));
    paddle::memory::Copy(paddle::platform::CPUPlace(),
                         static_cast<void *>(data), gpu_place, t_data,
                         ele_num * sizeof(T), dev_ctx->stream());
262 263 264
#ifdef PADDLE_WITH_HIP
    hipStreamSynchronize(dev_ctx->stream());
#else
265 266 267 268 269 270 271 272 273 274
    // async, return stream
    if (nullptr != exec_stream) {
      *(static_cast<cudaStream_t *>(exec_stream)) = dev_ctx->stream();
      // async with callback
    } else if (cb) {
      cudaLaunchHostFunc(dev_ctx->stream(), cb, cb_params);
      // sync
    } else {
      cudaStreamSynchronize(dev_ctx->stream());
    }
275
#endif
N
nhzlx 已提交
276
#else
277 278 279
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with CUDA place because paddle is not compiled "
        "with CUDA."));
N
nhzlx 已提交
280
#endif
281
  } else if (place_ == PlaceType::kXPU) {
282
#ifdef PADDLE_WITH_XPU
283
    auto xpu_place = t_place;
284 285 286
    paddle::memory::Copy(paddle::platform::CPUPlace(),
                         static_cast<void *>(data), xpu_place, t_data,
                         ele_num * sizeof(T));
287
#else
288 289 290
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with XPU place because paddle is not compiled "
        "with XPU."));
W
Wilber 已提交
291 292 293 294 295
#endif
  } else if (place_ == PlaceType::kNPU) {
#ifdef PADDLE_WITH_ASCEND_CL
    paddle::platform::DeviceContextPool &pool =
        paddle::platform::DeviceContextPool::Instance();
296
    auto npu_place = t_place;
W
Wilber 已提交
297 298 299 300 301
    auto *dev_ctx = static_cast<const paddle::platform::NPUDeviceContext *>(
        pool.Get(npu_place));
    paddle::memory::Copy(paddle::platform::CPUPlace(),
                         static_cast<void *>(data), npu_place, t_data,
                         ele_num * sizeof(T), dev_ctx->stream());
302
    paddle::platform::NPUStreamSync(dev_ctx->stream());
W
Wilber 已提交
303 304 305 306
#else
    PADDLE_THROW(paddle::platform::errors::Unavailable(
        "Can not create tensor with NPU place because paddle is not compiled "
        "with NPU."));
307 308 309
#endif
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
W
Wilber 已提交
310
        "The analysis predictor supports CPU, GPU, NPU and XPU now."));
N
nhzlx 已提交
311 312
  }
}
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328

template <typename T>
void Tensor::CopyToCpu(T *data) const {
  CopyToCpuImpl<T>(data, nullptr, nullptr, nullptr);
}

template <typename T>
void Tensor::CopyToCpuAsync(T *data, void *exec_stream) const {
  CopyToCpuImpl<T>(data, exec_stream, nullptr, nullptr);
}

template <typename T>
void Tensor::CopyToCpuAsync(T *data, CallbackFunc cb, void *cb_params) const {
  CopyToCpuImpl<T>(data, nullptr, cb, cb_params);
}

329 330 331 332 333
template PD_INFER_DECL void Tensor::CopyFromCpu<float>(const float *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<int64_t>(const int64_t *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<int32_t>(const int32_t *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<uint8_t>(const uint8_t *data);
template PD_INFER_DECL void Tensor::CopyFromCpu<int8_t>(const int8_t *data);
334
template PD_INFER_DECL void Tensor::CopyFromCpu<float16>(const float16 *data);
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
template PD_INFER_DECL void Tensor::CopyToCpu<float>(float *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<int64_t>(int64_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<int32_t>(int32_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<uint8_t>(uint8_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<int8_t>(int8_t *data) const;
template PD_INFER_DECL void Tensor::CopyToCpu<float16>(float16 *data) const;

template PD_INFER_DECL void Tensor::CopyToCpuImpl<float>(float *data,
                                                         void *exec_stream,
                                                         CallbackFunc cb,
                                                         void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<int64_t>(
    int64_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<int32_t>(
    int32_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<uint8_t>(
    uint8_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<int8_t>(
    int8_t *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuImpl<float16>(
    float16 *data, void *exec_stream, CallbackFunc cb, void *cb_params) const;

template PD_INFER_DECL void Tensor::CopyToCpuAsync<float>(
    float *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int64_t>(
    int64_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int32_t>(
    int32_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<uint8_t>(
    uint8_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int8_t>(
    int8_t *data, void *exec_stream) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<float16>(
    float16 *data, void *exec_stream) const;

template PD_INFER_DECL void Tensor::CopyToCpuAsync<float>(
    float *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int64_t>(
    int64_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int32_t>(
    int32_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<uint8_t>(
    uint8_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<int8_t>(
    int8_t *data, CallbackFunc cb, void *cb_params) const;
template PD_INFER_DECL void Tensor::CopyToCpuAsync<float16>(
    float16 *data, CallbackFunc cb, void *cb_params) const;
383

384 385 386 387 388 389 390 391 392 393
template PD_INFER_DECL float *Tensor::data<float>(PlaceType *place,
                                                  int *size) const;
template PD_INFER_DECL int64_t *Tensor::data<int64_t>(PlaceType *place,
                                                      int *size) const;
template PD_INFER_DECL int32_t *Tensor::data<int32_t>(PlaceType *place,
                                                      int *size) const;
template PD_INFER_DECL uint8_t *Tensor::data<uint8_t>(PlaceType *place,
                                                      int *size) const;
template PD_INFER_DECL int8_t *Tensor::data<int8_t>(PlaceType *place,
                                                    int *size) const;
394 395
template PD_INFER_DECL float16 *Tensor::data<float16>(PlaceType *place,
                                                      int *size) const;
396

397 398 399 400 401
template PD_INFER_DECL float *Tensor::mutable_data<float>(PlaceType place);
template PD_INFER_DECL int64_t *Tensor::mutable_data<int64_t>(PlaceType place);
template PD_INFER_DECL int32_t *Tensor::mutable_data<int32_t>(PlaceType place);
template PD_INFER_DECL uint8_t *Tensor::mutable_data<uint8_t>(PlaceType place);
template PD_INFER_DECL int8_t *Tensor::mutable_data<int8_t>(PlaceType place);
402
template PD_INFER_DECL float16 *Tensor::mutable_data<float16>(PlaceType place);
403

404 405 406 407 408 409
Tensor::Tensor(void *scope) : scope_{scope} {
  PADDLE_ENFORCE_NOT_NULL(scope_,
                          paddle::platform::errors::PreconditionNotMet(
                              "The `scope` can not be nullptr. It should be "
                              "set to the pointer of scope."));
}
410

S
Steffy-zxf 已提交
411
template <typename T>
412
void *Tensor::FindTensor() const {
W
Wilber 已提交
413 414
  PADDLE_ENFORCE_EQ(
      name_.empty(), false,
415
      paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
416 417
          "Need to SetName first, so that the corresponding tensor can "
          "be retrieved."));
418
  auto *scope = static_cast<paddle::framework::Scope *>(scope_);
419
  auto *var = scope->FindVar(name_);
W
Wilber 已提交
420
  PADDLE_ENFORCE_NOT_NULL(
421
      var, paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
422
               "No tensor called [%s] in the runtime scope", name_));
S
Steffy-zxf 已提交
423
  auto *tensor = var->GetMutable<T>();
424 425 426
  return tensor;
}

427
std::vector<int> Tensor::shape() const {
S
Steffy-zxf 已提交
428
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
W
Wilber 已提交
429
  PADDLE_ENFORCE_NOT_NULL(
430
      tensor_, paddle::platform::errors::PreconditionNotMet(
W
Wilber 已提交
431
                   "Not found tensor called %s in the scope", name_));
W
wenbin 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
// mkldnn may does layout transform internally, so need to reorder before
// return
#ifdef PADDLE_WITH_MKLDNN
  if (tensor->layout() == paddle::framework::DataLayout::kMKLDNN) {
    paddle::framework::DataLayout out_layout =
        paddle::platform::MKLDNNDeviceContext::tls()
            .get_cur_paddle_data_layout();
    // Set default as NCHW in case not specified
    out_layout = out_layout == paddle::framework::DataLayout::kAnyLayout
                     ? paddle::framework::DataLayout::kNCHW
                     : out_layout;
    // In these data layouts, channel dimension is either on 2nd position: nChw
    // or
    // at last nhwC, so for dim==2 these layouts are the same and nothing should
    // be done. Similarly for dim==1 when you have just one possible
    // combination.
    if (tensor->dims().size() < 3)
      return paddle::framework::vectorize<int>(tensor->dims());
    if (out_layout == paddle::framework::DataLayout::kNHWC) {
      auto dims = paddle::framework::vectorize<int>(tensor->dims());
      std::rotate(dims.begin() + 1, dims.begin() + 2, dims.end());
      return dims;
    } else {
      return paddle::framework::vectorize<int>(tensor->dims());
    }
  }
#endif
459
  return paddle::framework::vectorize<int>(tensor->dims());
460 461
}

462
void Tensor::SetLoD(const std::vector<std::vector<size_t>> &x) {
S
Steffy-zxf 已提交
463
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
464
  paddle::framework::LoD lod;
465 466 467 468 469 470
  for (auto &level : x) {
    lod.emplace_back(level);
  }
  tensor->set_lod(lod);
}

471
std::vector<std::vector<size_t>> Tensor::lod() const {
S
Steffy-zxf 已提交
472
  EAGER_GET_TENSOR(paddle::framework::LoDTensor);
473 474 475 476 477 478 479
  std::vector<std::vector<size_t>> res;
  for (auto &level : tensor->lod()) {
    res.emplace_back(level);
  }
  return res;
}

480 481 482 483 484 485 486 487 488 489
void Tensor::SetName(const std::string &name) { name_ = name; }

const std::string &Tensor::name() const { return name_; }

void Tensor::SetPlace(PlaceType place, int device) {
  place_ = place;
  device_ = device;
}

}  // namespace paddle_infer