tensor_py.h 28.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
16

L
Luo Tao 已提交
17
#include <Python.h>
W
wopeizl 已提交
18 19
#include <algorithm>
#include <memory>
Q
qijun 已提交
20
#include <string>
C
chengduoZH 已提交
21
#include <tuple>
22
#include <utility>
C
chengduoZH 已提交
23
#include <vector>
24
#include "paddle/fluid/framework/data_type.h"
Y
Yi Wang 已提交
25 26
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h"
W
wopeizl 已提交
27 28
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
29
#include "paddle/fluid/platform/bfloat16.h"
30 31 32
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif
Y
Yi Wang 已提交
33
#include "paddle/fluid/platform/device_context.h"
34
#include "paddle/fluid/platform/float16.h"
35
#include "paddle/fluid/platform/profiler.h"
Q
qijun 已提交
36 37
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
38

W
wopeizl 已提交
39 40
namespace py = pybind11;

41 42 43 44 45 46 47
namespace pybind11 {
namespace detail {

// Note: use same enum number of float16 in numpy.
// import numpy as np
// print np.dtype(np.float16).num  # 23
constexpr int NPY_FLOAT16_ = 23;
48
constexpr int NPY_UINT16_ = 4;
49 50
constexpr int NPY_COMPLEX64 = 14;
constexpr int NPY_COMPLEX128 = 15;
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

// Note: Since float16 is not a builtin type in C++, we register
// paddle::platform::float16 as numpy.float16.
// Ref: https://github.com/pybind/pybind11/issues/1776
template <>
struct npy_format_descriptor<paddle::platform::float16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "e" represents float16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "e";
  }
67
  static constexpr auto name = _("float16");
68 69
};

70 71 72 73 74 75 76 77 78 79 80 81 82 83
// Note: Since bfloat16 is not a builtin type in C++ and in numpy,
// we register paddle::platform::bfloat16 as numpy.uint16.
template <>
struct npy_format_descriptor<paddle::platform::bfloat16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_UINT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "H" represents UINT16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "H";
  }
84
  static constexpr auto name = _("bfloat16");
85 86
};

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
// we register paddle::platform::complex64 as numpy.complex64.
template <>
struct npy_format_descriptor<paddle::platform::complex64> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX64);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "F" represents complex64.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "F";
  }
  static constexpr auto name = _("complext64");
};

// we register paddle::platform::complex128 as numpy.complex128.
template <>
struct npy_format_descriptor<paddle::platform::complex128> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX128);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "D" represents complex128.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "D";
  }
  static constexpr auto name = _("complext128");
};

125 126 127
}  // namespace detail
}  // namespace pybind11

128
namespace paddle {
129
namespace pybind {
130

131 132
namespace details {

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
template <typename T>
class PYBIND11_HIDDEN NumpyAllocation : public memory::Allocation {
 public:
  explicit NumpyAllocation(const py::array &arr)
      : Allocation(const_cast<void *>(arr.data()), sizeof(T) * (arr.size()),
                   paddle::platform::CPUPlace()),
        arr_(arr.ptr()) {
    PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument(
                                      "The underlying PyObject pointer of "
                                      "numpy array cannot be nullptr"));
    PADDLE_ENFORCE_NE(
        arr_, Py_None,
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~NumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject *arr_;
};

158 159 160 161 162 163 164 165 166 167 168 169
template <typename T>
struct ValidDTypeToPyArrayChecker {
  static constexpr bool kValue = false;
};

#define DECLARE_VALID_DTYPE_TO_PY_ARRAY(type) \
  template <>                                 \
  struct ValidDTypeToPyArrayChecker<type> {   \
    static constexpr bool kValue = true;      \
  }

DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::float16);
170
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::bfloat16);
171 172
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex64);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex128);
173 174 175 176
DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
L
Leo Chen 已提交
177
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
178 179
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
L
Leo Chen 已提交
180
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
181 182 183 184 185 186 187

inline std::string TensorDTypeToPyDTypeStr(
    framework::proto::VarType::Type type) {
#define TENSOR_DTYPE_TO_PY_DTYPE(T, proto_type)                             \
  if (type == proto_type) {                                                 \
    if (std::is_same<T, platform::float16>::value) {                        \
      return "e";                                                           \
188 189 190
    } else if (std::is_same<T, platform::bfloat16>::value) {                \
      /* NumPy character code of uint16 due to no support for bfloat16 */   \
      return "H";                                                           \
191 192 193 194
    } else if (std::is_same<T, platform::complex64>::value) {               \
      return "F";                                                           \
    } else if (std::is_same<T, platform::complex128>::value) {              \
      return "D";                                                           \
195 196
    } else {                                                                \
      constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
197 198 199 200 201
      PADDLE_ENFORCE_EQ(                                                    \
          kIsValidDType, true,                                              \
          platform::errors::Unimplemented(                                  \
              "This type [%s] of tensor cannot be expose to Python",        \
              typeid(T).name()));                                           \
202 203 204 205 206 207
      return py::format_descriptor<T>::format();                            \
    }                                                                       \
  }

  _ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE
208 209
  PADDLE_THROW(platform::errors::Unimplemented(
      "Unsupported tensor data type: %s", framework::DataTypeToString(type)));
210 211 212 213
}

}  // namespace details

214
template <typename T>
215
T TensorGetElement(const framework::Tensor &self, size_t offset) {
216 217 218
  PADDLE_ENFORCE_LT(offset, self.numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
Q
qingqing01 已提交
219
  T b = static_cast<T>(0);
220
  if (platform::is_cpu_place(self.place())) {
Q
qingqing01 已提交
221
    b = self.data<T>()[offset];
222 223 224 225 226 227 228
  } else if (platform::is_xpu_place(self.place())) {
#ifdef PADDLE_WITH_XPU
    const T *a = self.data<T>();
    auto p = BOOST_GET_CONST(platform::XPUPlace, self.place());
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self.place())) {
Q
qingqing01 已提交
229 230
#ifdef PADDLE_WITH_CUDA
    const T *a = self.data<T>();
231
    auto p = BOOST_GET_CONST(platform::CUDAPlace, self.place());
Q
qingqing01 已提交
232 233 234
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
#endif
235
  }
Q
qingqing01 已提交
236
  return b;
237 238 239
}

template <typename T>
240
void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
241 242 243
  PADDLE_ENFORCE_LT(offset, self->numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
Q
qingqing01 已提交
244
  if (platform::is_cpu_place(self->place())) {
Y
Yu Yang 已提交
245
    self->mutable_data<T>(self->place())[offset] = elem;
246 247 248 249 250 251 252
  } else if (platform::is_xpu_place(self->place())) {
#ifdef PADDLE_WITH_XPU
    auto p = BOOST_GET_CONST(platform::XPUPlace, self->place());
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self->place())) {
Q
qingqing01 已提交
253
#ifdef PADDLE_WITH_CUDA
254
    auto p = BOOST_GET_CONST(platform::CUDAPlace, self->place());
Q
qingqing01 已提交
255 256 257 258
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
#endif
259
  }
260 261
}

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
// NOTE(wangxi): When copying data to the accelerator card,
// we need set_device(dev_id) first.
template <typename P>
static int GetDeviceId(const P &place) {
  // for CPUPlace and CUDAPinnedPlace.
  PADDLE_THROW(platform::errors::PermissionDenied(
      "Paddle can't Get CPUPlace or CUDAPinnedPlace Device Id."));
}

template <>
int GetDeviceId<platform::CUDAPlace>(const platform::CUDAPlace &place) {
  return place.GetDeviceId();
}

template <>
int GetDeviceId<platform::XPUPlace>(const platform::XPUPlace &place) {
  return place.GetDeviceId();
}

// NOTE(wangxi16): Used by VarBase __setitem__
template <>
int GetDeviceId<platform::Place>(const platform::Place &place) {
  if (paddle::platform::is_gpu_place(place)) {
    return GetDeviceId(BOOST_GET_CONST(platform::CUDAPlace, place));
  } else if (paddle::platform::is_xpu_place(place)) {
    return GetDeviceId(BOOST_GET_CONST(platform::XPUPlace, place));
  }
  // for CPUPlace and CUDAPinnedPlace.
  PADDLE_THROW(platform::errors::PermissionDenied(
      "Paddle can't Get CPUPlace or CUDAPinnedPlace Device Id."));
}

294 295 296
template <typename T, typename P>
void SetTensorFromPyArrayT(
    framework::Tensor *self,
297
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
298
    const P &place, bool zero_copy) {
299 300 301 302 303 304 305 306
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.push_back(static_cast<int>(array.shape()[i]));
  }
  self->Resize(framework::make_ddim(dims));

  if (paddle::platform::is_cpu_place(place)) {
307 308 309 310 311 312 313 314
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
      self->ResetHolderWithType(holder, type);
    } else {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
    }
315 316
  } else if (paddle::platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
317
    platform::XPUDeviceGuard guard(GetDeviceId(place));
318 319 320 321 322 323 324 325
    auto dst = self->mutable_data<T>(place);
    xpu_memcpy(dst, array.data(), array.nbytes(),
               XPUMemcpyKind::XPU_HOST_TO_DEVICE);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
326 327
  } else {
#ifdef PADDLE_WITH_CUDA
328
    if (paddle::platform::is_gpu_place(place)) {
329
      platform::CUDADeviceGuard guard(GetDeviceId(place));
330
      auto dst = self->mutable_data<T>(place);
331 332
      paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
                                      cudaMemcpyHostToDevice);
333

334 335 336
    } else if (paddle::platform::is_cuda_pinned_place(place)) {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
337
    } else {
338 339 340
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible place type: Tensor.set() supports "
          "CPUPlace, CUDAPlace "
341
          "and CUDAPinnedPlace, but got %s!",
342
          place));
343 344
    }
#else
345
    PADDLE_THROW(platform::errors::PermissionDenied(
346
        "Cannot use CUDAPlace or CUDAPinnedPlace in CPU only version, "
347
        "Please recompile or reinstall Paddle with CUDA support."));
348 349 350 351 352
#endif
  }
}

template <typename P>
353
void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
354
                          const P &place, bool zero_copy) {
355
  auto array = obj.cast<py::array>();
356
  if (py::isinstance<py::array_t<float>>(array)) {
357
    SetTensorFromPyArrayT<float, P>(self, array, place, zero_copy);
358
  } else if (py::isinstance<py::array_t<int>>(array)) {
359
    SetTensorFromPyArrayT<int, P>(self, array, place, zero_copy);
360
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
361
    SetTensorFromPyArrayT<int64_t, P>(self, array, place, zero_copy);
362
  } else if (py::isinstance<py::array_t<double>>(array)) {
363
    SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
364
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
365
    SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
L
Leo Chen 已提交
366 367
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
368
  } else if (py::isinstance<py::array_t<uint8_t>>(array)) {
369
    SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
370
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
371 372
    SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
                                                        zero_copy);
373 374 375 376 377 378
  } else if (py::isinstance<py::array_t<paddle::platform::complex64>>(array)) {
    SetTensorFromPyArrayT<paddle::platform::complex64, P>(self, array, place,
                                                          zero_copy);
  } else if (py::isinstance<py::array_t<paddle::platform::complex128>>(array)) {
    SetTensorFromPyArrayT<paddle::platform::complex128, P>(self, array, place,
                                                           zero_copy);
379
  } else if (py::isinstance<py::array_t<uint16_t>>(array)) {
380 381 382 383
    // since there is still no support for bfloat16 in NumPy,
    // uint16 is used for casting bfloat16
    SetTensorFromPyArrayT<paddle::platform::bfloat16, P>(self, array, place,
                                                         zero_copy);
384
  } else if (py::isinstance<py::array_t<bool>>(array)) {
385
    SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
386
  } else {
387 388
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning,
389
    PADDLE_THROW(platform::errors::InvalidArgument(
390 391 392 393
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64, uint8 or uint16, "
        "please check your input or input array data type."));
394 395 396
  }
}

W
wopeizl 已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
template <typename T, size_t D>
void _sliceCompute(const framework::Tensor *in, framework::Tensor *out,
                   const platform::CPUDeviceContext &ctx,
                   const std::vector<int> &axes,
                   const std::vector<int> &starts) {
  auto &eigen_place = *ctx.eigen_device();
  auto place = in->place();
  auto out_dims = out->dims();
  auto in_dims = in->dims();

  auto offsets = Eigen::array<int, D>();
  auto extents = Eigen::array<int, D>();
  for (size_t i = 0; i < D; ++i) {
    offsets[i] = 0;
    extents[i] = out_dims[i];
  }
  int start;
  for (size_t i = 0; i < axes.size(); ++i) {
    start = starts[i];
    if (start < 0) {
      start = (start + in_dims[axes[i]]);
    }
    start = std::max(start, 0);
    offsets[axes[i]] = start;
  }
  auto in_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *in);
  auto out_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *out);
  out_t.device(eigen_place) = in_t.slice(offsets, extents);
}

template <typename T>
void _concatCompute(const std::vector<paddle::framework::Tensor> &ins,
                    paddle::framework::Tensor *out,
                    const platform::CPUDeviceContext &ctx, int64_t axis) {
  if (axis == 0 && ins.size() < 10) {
    size_t output_offset = 0;
    for (auto &in : ins) {
      auto in_stride = framework::stride_numel(in.dims());
      auto out_stride = framework::stride_numel(out->dims());
      paddle::operators::StridedNumelCopyWithAxis<T>(
          ctx, axis, out->data<T>() + output_offset, out_stride, in.data<T>(),
          in_stride, in_stride[axis]);
      output_offset += in_stride[axis];
    }
  } else {
    paddle::operators::math::ConcatFunctor<platform::CPUDeviceContext, T>
        concat_functor;
    concat_functor(ctx, ins, static_cast<int>(axis), out);
  }
}

L
Leo Chen 已提交
452 453 454
inline void _getSliceinfo(const framework::Tensor &self, py::object obj,
                          const int64_t dim, int64_t *pstart, int64_t *pstop,
                          int64_t *pstep, int64_t *pslicelength) {
W
wopeizl 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
  auto &start = *pstart;
  auto &stop = *pstop;
  auto &step = *pstep;
  auto &slicelength = *pslicelength;
  const framework::DDim &srcDDim = self.dims();
  if (dim < 0 || dim >= srcDDim.size()) {
    throw py::index_error();
  }
  if (py::isinstance<py::slice>(obj)) {
    size_t lstart, lstop, lstep, lslicelength;
    py::slice s = static_cast<py::slice>(obj);
    if (!s.compute(srcDDim[dim], &lstart, &lstop, &lstep, &lslicelength)) {
      throw py::index_error();
    }
    start = static_cast<int64_t>(lstart);
    stop = static_cast<int64_t>(lstop);
    step = static_cast<int64_t>(lstep);
    slicelength = static_cast<int64_t>(lslicelength);
  } else if (py::isinstance<py::int_>(obj)) {
    start = static_cast<int64_t>(static_cast<py::int_>(obj));
    if (std::abs(start) >= srcDDim[dim]) {
      throw py::index_error();
    }
    start = (start >= 0) ? start : srcDDim[dim] - start;
    stop = start + 1;
    step = 1;
    slicelength = 1;
  } else {
    throw py::index_error();
  }
}

inline framework::Tensor *_getTensor(const framework::Tensor &self,
                                     const framework::DDim &ddim) {
  framework::Tensor *output = new framework::Tensor();
  output->Resize(ddim);
  auto place = self.place();
  if (platform::is_cpu_place(place)) {
493 494
    output->mutable_data(BOOST_GET_CONST(platform::CPUPlace, place),
                         self.type());
495 496 497 498 499
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
    output->mutable_data(BOOST_GET_CONST(platform::XPUPlace, place),
                         self.type());
#endif
W
wopeizl 已提交
500
  } else {
501
#ifdef PADDLE_WITH_CUDA
W
wopeizl 已提交
502
    if (platform::is_cuda_pinned_place(place)) {
503
      output->mutable_data(BOOST_GET_CONST(platform::CUDAPinnedPlace, place),
W
wopeizl 已提交
504 505
                           self.type());
    } else if ((platform::is_gpu_place(place))) {
506 507
      output->mutable_data(BOOST_GET_CONST(platform::CUDAPlace, place),
                           self.type());
W
wopeizl 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
    }
#endif
  }
  return output;
}

template <typename T>
void _sliceDapper(const framework::Tensor *in, framework::Tensor *out,
                  const platform::CPUDeviceContext &ctx,
                  const std::vector<int> &axes, const std::vector<int> &starts,
                  int size) {
  switch (size) {
    case 1:
      _sliceCompute<T, 1>(in, out, ctx, axes, starts);
      break;
    case 2:
      _sliceCompute<T, 2>(in, out, ctx, axes, starts);
      break;
    case 3:
      _sliceCompute<T, 3>(in, out, ctx, axes, starts);
      break;
    case 4:
      _sliceCompute<T, 4>(in, out, ctx, axes, starts);
      break;
    case 5:
      _sliceCompute<T, 5>(in, out, ctx, axes, starts);
      break;
    case 6:
      _sliceCompute<T, 6>(in, out, ctx, axes, starts);
      break;
    case 7:
      _sliceCompute<T, 7>(in, out, ctx, axes, starts);
      break;
    case 8:
      _sliceCompute<T, 8>(in, out, ctx, axes, starts);
      break;
    case 9:
      _sliceCompute<T, 9>(in, out, ctx, axes, starts);
      break;
    default:
548 549
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The dim size should be 1 to 9, current is %d", size));
W
wopeizl 已提交
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
      break;
  }
}

template <typename T>
inline framework::Tensor *_sliceWrapper(const framework::Tensor &self,
                                        const platform::CPUDeviceContext &ctx,
                                        py::object obj, int dim, int64_t start,
                                        int64_t slicelength) {
  framework::DDim dstDDim = self.dims();
  dstDDim[dim] = static_cast<int64_t>(slicelength);
  std::vector<int> axes({dim});
  std::vector<int> starts({static_cast<int>(start)});
  framework::Tensor *output = _getTensor(self, dstDDim);
  _sliceDapper<T>(&self, output, ctx, axes, starts, dstDDim.size());
  return output;
}

template <typename T>
inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self,
                                          py::object obj, int dim) {
  platform::CPUDeviceContext ctx;
  int64_t start, stop, step, slicelength;
  _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength);
  if (step == 1 || slicelength == 1) {
    return _sliceWrapper<T>(self, ctx, obj, dim, start, slicelength);
  } else {
    std::vector<framework::Tensor> ins;
    for (auto i = 0; i < slicelength; ++i, start += step) {
      ins.emplace_back(*_sliceWrapper<T>(self, ctx, obj, dim, start, 1));
    }

    // do the concat operation
    framework::DDim dstDDim = self.dims();
    dstDDim[dim] = static_cast<int64_t>(slicelength);
    framework::Tensor *output1 = _getTensor(self, dstDDim);
    _concatCompute<T>(ins, output1, ctx, dim);
    return output1;
  }
}

inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
                                       py::object obj, int dim) {
  auto src_type = self.type();
  switch (src_type) {
    case framework::proto::VarType::FP16:
      return _sliceAndConcat<paddle::platform::float16>(self, obj, dim);
597 598
    case framework::proto::VarType::BF16:
      return _sliceAndConcat<paddle::platform::bfloat16>(self, obj, dim);
599 600 601 602
    case framework::proto::VarType::COMPLEX64:
      return _sliceAndConcat<paddle::platform::complex64>(self, obj, dim);
    case framework::proto::VarType::COMPLEX128:
      return _sliceAndConcat<paddle::platform::complex128>(self, obj, dim);
W
wopeizl 已提交
603 604 605 606
    case framework::proto::VarType::FP32:
      return _sliceAndConcat<float>(self, obj, dim);
    case framework::proto::VarType::FP64:
      return _sliceAndConcat<double>(self, obj, dim);
L
Leo Chen 已提交
607 608 609 610
    case framework::proto::VarType::INT8:
      return _sliceAndConcat<int8_t>(self, obj, dim);
    case framework::proto::VarType::INT16:
      return _sliceAndConcat<int16_t>(self, obj, dim);
W
wopeizl 已提交
611 612 613 614 615 616 617
    case framework::proto::VarType::INT32:
      return _sliceAndConcat<int>(self, obj, dim);
    case framework::proto::VarType::INT64:
      return _sliceAndConcat<int64_t>(self, obj, dim);
    case framework::proto::VarType::BOOL:
      return _sliceAndConcat<bool>(self, obj, dim);
    case framework::proto::VarType::UINT8:
L
Leo Chen 已提交
618
      return _sliceAndConcat<uint8_t>(self, obj, dim);
W
wopeizl 已提交
619
    default:
620 621 622
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Not support tensor type: %s",
          framework::DataTypeToString(src_type)));
W
wopeizl 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
  }
}

inline framework::Tensor *_pySliceTensor(const framework::Tensor &self,
                                         py::object obj) {
  if (py::isinstance<py::tuple>(obj)) {
    py::list l = static_cast<py::list>(obj);
    std::unique_ptr<framework::Tensor> target;
    framework::Tensor *src = const_cast<framework::Tensor *>(&self);
    for (auto i = 0; i < static_cast<int>(l.size()); ++i) {
      src = _sliceTensor(*src, l[i], i);
      if (i + 1 == static_cast<int>(l.size())) {
        return src;
      } else {
        target.reset(src);
      }
    }
    return nullptr;
  } else {
    return _sliceTensor(self, obj, 0);
  }
}

inline framework::Tensor *PySliceTensor(const framework::Tensor &self,
                                        py::object obj) {
  if (platform::is_gpu_place(self.place())) {
    std::unique_ptr<framework::Tensor> holder;
    framework::Tensor src;
    framework::TensorCopySync(self, platform::CPUPlace(), &src);
    framework::Tensor *output = _pySliceTensor(src, obj);
    holder.reset(output);
    framework::Tensor *dst = _getTensor(*output, output->dims());
    framework::TensorCopySync(*output, self.place(), dst);
    return dst;
  } else {
    return _pySliceTensor(self, obj);
  }
}

662 663
inline py::array TensorToPyArray(const framework::Tensor &tensor,
                                 bool need_deep_copy = false) {
Q
qingqing01 已提交
664 665 666
  if (!tensor.IsInitialized()) {
    return py::array();
  }
667
  bool is_gpu_tensor = platform::is_gpu_place(tensor.place());
668
  bool is_xpu_tensor = platform::is_xpu_place(tensor.place());
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
  const auto &tensor_dims = tensor.dims();
  auto tensor_dtype = tensor.type();
  size_t sizeof_dtype = framework::SizeOfType(tensor_dtype);

  std::vector<size_t> py_dims(tensor_dims.size());
  std::vector<size_t> py_strides(tensor_dims.size());

  size_t numel = 1;
  for (int i = tensor_dims.size() - 1; i >= 0; --i) {
    py_dims[i] = (size_t)tensor_dims[i];
    py_strides[i] = sizeof_dtype * numel;
    numel *= py_dims[i];
  }

  const void *tensor_buf_ptr = tensor.data<void>();

  std::string py_dtype_str = details::TensorDTypeToPyDTypeStr(tensor.type());

687
  if (!is_gpu_tensor && !is_xpu_tensor) {
688
    if (!need_deep_copy) {
689 690 691
      auto base = py::cast(std::move(tensor));
      return py::array(py::dtype(py_dtype_str.c_str()), py_dims, py_strides,
                       const_cast<void *>(tensor_buf_ptr), base);
692 693
    } else {
      py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
694 695 696 697 698 699 700 701 702 703
      PADDLE_ENFORCE_EQ(
          py_arr.writeable(), true,
          platform::errors::InvalidArgument(
              "PyArray is not writable, in which case memory leak "
              "or double free would occur"));
      PADDLE_ENFORCE_EQ(
          py_arr.owndata(), true,
          platform::errors::InvalidArgument(
              "PyArray does not own data, in which case  memory leak "
              "or double free would occur"));
704 705 706 707 708 709
      platform::CPUPlace place;
      size_t copy_bytes = sizeof_dtype * numel;
      paddle::memory::Copy(place, py_arr.mutable_data(), place, tensor_buf_ptr,
                           copy_bytes);
      return py_arr;
    }
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
  } else if (is_xpu_tensor) {
#ifdef PADDLE_WITH_XPU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
    auto p = BOOST_GET_CONST(platform::XPUPlace, tensor.place());
    paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p,
                         tensor_buf_ptr, copy_bytes);
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
  } else if (is_gpu_tensor) {
734
#ifdef PADDLE_WITH_CUDA
735 736 737 738 739 740 741 742 743 744 745 746
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
747 748 749
    auto p = BOOST_GET_CONST(platform::CUDAPlace, tensor.place());
    paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p,
                         tensor_buf_ptr, copy_bytes, nullptr);
750
    return py_arr;
751
#else
752 753 754
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CUDAPlace in CPU only version, "
        "Please recompile or reinstall Paddle with CUDA support."));
755
#endif
756 757 758
  }
  PADDLE_THROW(platform::errors::Unimplemented("Place is not supported"));
  return py::array();
759 760
}

761 762
}  // namespace pybind
}  // namespace paddle