tensor_py.h 34.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
16

L
Luo Tao 已提交
17
#include <Python.h>
W
wopeizl 已提交
18 19
#include <algorithm>
#include <memory>
Q
qijun 已提交
20
#include <string>
C
chengduoZH 已提交
21
#include <tuple>
22
#include <utility>
C
chengduoZH 已提交
23
#include <vector>
24
#include "paddle/fluid/framework/data_type.h"
Y
Yi Wang 已提交
25 26
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h"
27
#include "paddle/fluid/operators/eigen/eigen_function.h"
W
wopeizl 已提交
28 29
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
30
#include "paddle/fluid/platform/bfloat16.h"
31
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
32 33
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif
Y
Yi Wang 已提交
34
#include "paddle/fluid/platform/device_context.h"
35
#include "paddle/fluid/platform/float16.h"
36
#include "paddle/fluid/platform/profiler.h"
Q
qijun 已提交
37 38
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
39

W
wopeizl 已提交
40 41
namespace py = pybind11;

42 43 44 45 46 47 48
namespace pybind11 {
namespace detail {

// Note: use same enum number of float16 in numpy.
// import numpy as np
// print np.dtype(np.float16).num  # 23
constexpr int NPY_FLOAT16_ = 23;
49
constexpr int NPY_UINT16_ = 4;
50 51
constexpr int NPY_COMPLEX64 = 14;
constexpr int NPY_COMPLEX128 = 15;
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

// Note: Since float16 is not a builtin type in C++, we register
// paddle::platform::float16 as numpy.float16.
// Ref: https://github.com/pybind/pybind11/issues/1776
template <>
struct npy_format_descriptor<paddle::platform::float16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "e" represents float16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "e";
  }
68
  static constexpr auto name = _("float16");
69 70
};

71 72 73 74 75 76 77 78 79 80 81 82 83 84
// Note: Since bfloat16 is not a builtin type in C++ and in numpy,
// we register paddle::platform::bfloat16 as numpy.uint16.
template <>
struct npy_format_descriptor<paddle::platform::bfloat16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_UINT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "H" represents UINT16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "H";
  }
85
  static constexpr auto name = _("bfloat16");
86 87
};

88
// we register paddle::platform::complex<float> as numpy.complex64.
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
template <>
struct npy_format_descriptor<paddle::platform::complex<float>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX64);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "F" represents complex64.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "F";
  }
  static constexpr auto name = _("complext64");
};

template <>
struct npy_format_descriptor<paddle::platform::complex<double>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX128);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "D" represents complex128.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "D";
  }
  static constexpr auto name = _("complext128");
};

125 126 127
}  // namespace detail
}  // namespace pybind11

128
namespace paddle {
129
namespace pybind {
130

131 132
namespace details {

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
template <typename T>
class PYBIND11_HIDDEN NumpyAllocation : public memory::Allocation {
 public:
  explicit NumpyAllocation(const py::array &arr)
      : Allocation(const_cast<void *>(arr.data()), sizeof(T) * (arr.size()),
                   paddle::platform::CPUPlace()),
        arr_(arr.ptr()) {
    PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument(
                                      "The underlying PyObject pointer of "
                                      "numpy array cannot be nullptr"));
    PADDLE_ENFORCE_NE(
        arr_, Py_None,
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~NumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject *arr_;
};

158 159 160 161 162 163 164 165 166 167 168 169
template <typename T>
struct ValidDTypeToPyArrayChecker {
  static constexpr bool kValue = false;
};

#define DECLARE_VALID_DTYPE_TO_PY_ARRAY(type) \
  template <>                                 \
  struct ValidDTypeToPyArrayChecker<type> {   \
    static constexpr bool kValue = true;      \
  }

DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::float16);
170
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::bfloat16);
171 172
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<float>);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<double>);
173 174 175 176
DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
L
Leo Chen 已提交
177
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
178 179
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
L
Leo Chen 已提交
180
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
181 182 183 184 185 186 187

inline std::string TensorDTypeToPyDTypeStr(
    framework::proto::VarType::Type type) {
#define TENSOR_DTYPE_TO_PY_DTYPE(T, proto_type)                             \
  if (type == proto_type) {                                                 \
    if (std::is_same<T, platform::float16>::value) {                        \
      return "e";                                                           \
188 189 190
    } else if (std::is_same<T, platform::bfloat16>::value) {                \
      /* NumPy character code of uint16 due to no support for bfloat16 */   \
      return "H";                                                           \
191 192 193 194
    } else if (std::is_same<T, platform::complex<float>>::value) {          \
      return "F";                                                           \
    } else if (std::is_same<T, platform::complex<double>>::value) {         \
      return "D";                                                           \
195 196
    } else {                                                                \
      constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
197 198 199 200 201
      PADDLE_ENFORCE_EQ(                                                    \
          kIsValidDType, true,                                              \
          platform::errors::Unimplemented(                                  \
              "This type [%s] of tensor cannot be expose to Python",        \
              typeid(T).name()));                                           \
202 203 204 205 206 207
      return py::format_descriptor<T>::format();                            \
    }                                                                       \
  }

  _ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE
208 209
  PADDLE_THROW(platform::errors::Unimplemented(
      "Unsupported tensor data type: %s", framework::DataTypeToString(type)));
210 211 212 213
}

}  // namespace details

214
template <typename T>
215
T TensorGetElement(const framework::Tensor &self, size_t offset) {
216 217 218
  PADDLE_ENFORCE_LT(offset, self.numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
219

Q
qingqing01 已提交
220
  T b = static_cast<T>(0);
221
  if (platform::is_cpu_place(self.place())) {
Q
qingqing01 已提交
222
    b = self.data<T>()[offset];
223 224 225
  } else if (platform::is_xpu_place(self.place())) {
#ifdef PADDLE_WITH_XPU
    const T *a = self.data<T>();
226
    auto p = self.place();
227 228 229
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self.place())) {
230
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
qingqing01 已提交
231
    const T *a = self.data<T>();
232
    auto p = self.place();
Q
qingqing01 已提交
233 234
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
235 236 237 238
#endif
  } else if (platform::is_mlu_place(self.place())) {
#ifdef PADDLE_WITH_MLU
    const T *a = self.data<T>();
239
    auto p = self.place();
240 241
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
242 243 244 245
#endif
  } else if (platform::is_npu_place(self.place())) {
#if defined(PADDLE_WITH_ASCEND_CL)
    const T *a = self.data<T>();
246
    auto p = self.place();
247 248
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
Q
qingqing01 已提交
249
#endif
250
  }
251 252
  VLOG(10) << "TensorGetElement, place: " << self.place()
           << ", offset: " << offset << ", element: " << b;
Q
qingqing01 已提交
253
  return b;
254 255 256
}

template <typename T>
257
void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
258 259 260
  PADDLE_ENFORCE_LT(offset, self->numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
261 262
  VLOG(10) << "TensorSetElement, place: " << self->place()
           << ", offset: " << offset << ", element: " << elem;
Q
qingqing01 已提交
263
  if (platform::is_cpu_place(self->place())) {
Y
Yu Yang 已提交
264
    self->mutable_data<T>(self->place())[offset] = elem;
265 266
  } else if (platform::is_xpu_place(self->place())) {
#ifdef PADDLE_WITH_XPU
267
    auto p = self->place();
268 269 270 271
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self->place())) {
272
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
273
    auto p = self->place();
Q
qingqing01 已提交
274 275 276
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
277 278 279
#endif
  } else if (platform::is_mlu_place(self->place())) {
#ifdef PADDLE_WITH_MLU
280
    auto p = self->place();
281 282 283
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
284 285 286
#endif
  } else if (platform::is_npu_place(self->place())) {
#if defined(PADDLE_WITH_ASCEND_CL)
287
    auto p = self->place();
288 289 290
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
Q
qingqing01 已提交
291
#endif
292
  }
293 294
}

295 296 297
template <typename T, typename P>
void SetTensorFromPyArrayT(
    framework::Tensor *self,
298
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
299
    const P &place, bool zero_copy) {
300 301 302 303 304 305 306 307
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.push_back(static_cast<int>(array.shape()[i]));
  }
  self->Resize(framework::make_ddim(dims));

  if (paddle::platform::is_cpu_place(place)) {
308 309 310 311 312 313 314 315
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
      self->ResetHolderWithType(holder, type);
    } else {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
    }
316 317
  } else if (paddle::platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
W
WangXi 已提交
318 319 320
    // NOTE(wangxi): When copying data to the accelerator card,
    // we need set_device(dev_id) first.
    platform::Place tmp_place = place;
321
    platform::XPUDeviceGuard guard(tmp_place.device);
322
    auto dst = self->mutable_data<T>(place);
323
    memory::Copy(tmp_place, static_cast<void *>(dst), platform::CPUPlace(),
T
taixiurong 已提交
324
                 static_cast<const void *>(array.data()), array.nbytes());
325 326 327 328
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
J
jianghaicheng 已提交
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
#endif
  } else if (paddle::platform::is_ipu_place(place)) {
#ifdef PADDLE_WITH_IPU
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
      self->ResetHolderWithType(holder, type);
    } else {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
    }
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use IPUPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with IPU support."));
344 345 346 347
#endif
  } else if (paddle::platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
    platform::Place tmp_place = place;
348
    platform::NPUDeviceGuard guard(tmp_place.device);
349 350 351 352 353 354 355 356 357 358
    auto dst = self->mutable_data<T>(place);
    platform::NPUMemcpySync(dst, array.data(), array.nbytes(),
                            ACL_MEMCPY_HOST_TO_DEVICE);
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(place);
    ctx.Wait();
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use NPUPlace in CPU/GPU/XPU version. "
        "Please recompile or reinstall Paddle with NPU support."));
359 360 361 362
#endif
  } else if (paddle::platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
    platform::Place tmp_place = place;
363
    platform::MLUDeviceGuard guard(tmp_place.device);
364 365 366 367 368 369
    auto dst = self->mutable_data<T>(place);
    paddle::platform::MLUMemcpyH2DSync(dst, array.data(), array.nbytes());
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use MLUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with MLU support."));
370
#endif
371
  } else {
372
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
373
    if (paddle::platform::is_gpu_place(place)) {
W
WangXi 已提交
374 375
      // NOTE(wangxi): When copying data to the accelerator card,
      // we need set_device(dev_id) first.
376
      platform::CUDADeviceGuard guard(place.device);
377
      auto dst = self->mutable_data<T>(place);
378 379 380 381
#ifdef PADDLE_WITH_HIP
      paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
                                      hipMemcpyHostToDevice);
#else
382 383
      paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
                                      cudaMemcpyHostToDevice);
384
#endif
385

386 387 388
    } else if (paddle::platform::is_cuda_pinned_place(place)) {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
389
    } else {
390 391 392
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible place type: Tensor.set() supports "
          "CPUPlace, CUDAPlace "
393
          "and CUDAPinnedPlace, but got %s!",
394
          place));
395 396
    }
#else
397
    PADDLE_THROW(platform::errors::PermissionDenied(
398
        "Cannot use CUDAPlace or CUDAPinnedPlace in CPU only version, "
399
        "Please recompile or reinstall Paddle with CUDA support."));
400 401 402 403 404
#endif
  }
}

template <typename P>
405
void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
406
                          const P &place, bool zero_copy) {
407
  auto array = obj.cast<py::array>();
408
  if (py::isinstance<py::array_t<float>>(array)) {
409
    SetTensorFromPyArrayT<float, P>(self, array, place, zero_copy);
410
  } else if (py::isinstance<py::array_t<int>>(array)) {
411
    SetTensorFromPyArrayT<int, P>(self, array, place, zero_copy);
412
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
413
    SetTensorFromPyArrayT<int64_t, P>(self, array, place, zero_copy);
414
  } else if (py::isinstance<py::array_t<double>>(array)) {
415
    SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
416
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
417
    SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
L
Leo Chen 已提交
418 419
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
420
  } else if (py::isinstance<py::array_t<uint8_t>>(array)) {
421
    SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
422
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
423 424
    SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
                                                        zero_copy);
425 426 427 428 429 430 431 432
  } else if (py::isinstance<py::array_t<paddle::platform::complex<float>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<float>, P>(
        self, array, place, zero_copy);
  } else if (py::isinstance<py::array_t<paddle::platform::complex<double>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<double>, P>(
        self, array, place, zero_copy);
433
  } else if (py::isinstance<py::array_t<uint16_t>>(array)) {
434 435 436 437
    // since there is still no support for bfloat16 in NumPy,
    // uint16 is used for casting bfloat16
    SetTensorFromPyArrayT<paddle::platform::bfloat16, P>(self, array, place,
                                                         zero_copy);
438
  } else if (py::isinstance<py::array_t<bool>>(array)) {
439
    SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
440
  } else {
441 442
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning,
443
    PADDLE_THROW(platform::errors::InvalidArgument(
444 445 446 447
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64, uint8 or uint16, "
        "please check your input or input array data type."));
448 449 450
  }
}

S
Siming Dai 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
template <typename T>
void SetUVATensorFromPyArray(
    const std::shared_ptr<paddle::imperative::VarBase> &self,
    const py::array_t<T> &array, int device_id) {
#if defined(PADDLE_WITH_CUDA)
  auto *self_tensor = self->MutableVar()->GetMutable<framework::LoDTensor>();
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  int64_t numel = 1;
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.emplace_back(static_cast<int>(array.shape()[i]));
    numel *= static_cast<int>(array.shape()[i]);
  }
  self_tensor->Resize(framework::make_ddim(dims));

  auto data_type = framework::ToDataType(std::type_index(typeid(T)));
  const auto &need_allocate_size = numel * framework::SizeOfType(data_type);
  T *data_ptr;
  cudaHostAlloc(reinterpret_cast<void **>(&data_ptr), need_allocate_size,
                cudaHostAllocWriteCombined | cudaHostAllocMapped);
  std::memcpy(data_ptr, array.data(), array.nbytes());

  void *cuda_device_pointer = nullptr;
  cudaHostGetDevicePointer(reinterpret_cast<void **>(&cuda_device_pointer),
                           reinterpret_cast<void *>(data_ptr), 0);
  std::shared_ptr<memory::allocation::Allocation> holder =
      std::make_shared<memory::allocation::Allocation>(
          cuda_device_pointer, need_allocate_size,
          platform::CUDAPlace(device_id));
  self_tensor->ResetHolderWithType(holder, data_type);
#endif
}

W
wopeizl 已提交
484 485 486 487 488 489 490 491 492
template <typename T, size_t D>
void _sliceCompute(const framework::Tensor *in, framework::Tensor *out,
                   const platform::CPUDeviceContext &ctx,
                   const std::vector<int> &axes,
                   const std::vector<int> &starts) {
  auto &eigen_place = *ctx.eigen_device();
  auto out_dims = out->dims();
  auto in_dims = in->dims();

493 494
  auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>();
  auto extents = Eigen::DSizes<Eigen::DenseIndex, D>();
W
wopeizl 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
  for (size_t i = 0; i < D; ++i) {
    offsets[i] = 0;
    extents[i] = out_dims[i];
  }
  int start;
  for (size_t i = 0; i < axes.size(); ++i) {
    start = starts[i];
    if (start < 0) {
      start = (start + in_dims[axes[i]]);
    }
    start = std::max(start, 0);
    offsets[axes[i]] = start;
  }
  auto in_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *in);
  auto out_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *out);
514 515
  operators::EigenSlice<std::decay_t<decltype(eigen_place)>, T, D>::Eval(
      eigen_place, out_t, in_t, offsets, extents);
W
wopeizl 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
}

template <typename T>
void _concatCompute(const std::vector<paddle::framework::Tensor> &ins,
                    paddle::framework::Tensor *out,
                    const platform::CPUDeviceContext &ctx, int64_t axis) {
  if (axis == 0 && ins.size() < 10) {
    size_t output_offset = 0;
    for (auto &in : ins) {
      auto in_stride = framework::stride_numel(in.dims());
      auto out_stride = framework::stride_numel(out->dims());
      paddle::operators::StridedNumelCopyWithAxis<T>(
          ctx, axis, out->data<T>() + output_offset, out_stride, in.data<T>(),
          in_stride, in_stride[axis]);
      output_offset += in_stride[axis];
    }
  } else {
    paddle::operators::math::ConcatFunctor<platform::CPUDeviceContext, T>
        concat_functor;
    concat_functor(ctx, ins, static_cast<int>(axis), out);
  }
}

L
Leo Chen 已提交
539 540 541
inline void _getSliceinfo(const framework::Tensor &self, py::object obj,
                          const int64_t dim, int64_t *pstart, int64_t *pstop,
                          int64_t *pstep, int64_t *pslicelength) {
W
wopeizl 已提交
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
  auto &start = *pstart;
  auto &stop = *pstop;
  auto &step = *pstep;
  auto &slicelength = *pslicelength;
  const framework::DDim &srcDDim = self.dims();
  if (dim < 0 || dim >= srcDDim.size()) {
    throw py::index_error();
  }
  if (py::isinstance<py::slice>(obj)) {
    size_t lstart, lstop, lstep, lslicelength;
    py::slice s = static_cast<py::slice>(obj);
    if (!s.compute(srcDDim[dim], &lstart, &lstop, &lstep, &lslicelength)) {
      throw py::index_error();
    }
    start = static_cast<int64_t>(lstart);
    stop = static_cast<int64_t>(lstop);
    step = static_cast<int64_t>(lstep);
    slicelength = static_cast<int64_t>(lslicelength);
  } else if (py::isinstance<py::int_>(obj)) {
    start = static_cast<int64_t>(static_cast<py::int_>(obj));
    if (std::abs(start) >= srcDDim[dim]) {
      throw py::index_error();
    }
    start = (start >= 0) ? start : srcDDim[dim] - start;
    stop = start + 1;
    step = 1;
    slicelength = 1;
  } else {
    throw py::index_error();
  }
}

inline framework::Tensor *_getTensor(const framework::Tensor &self,
                                     const framework::DDim &ddim) {
  framework::Tensor *output = new framework::Tensor();
  output->Resize(ddim);
  auto place = self.place();
  if (platform::is_cpu_place(place)) {
580
    output->mutable_data(place, self.type());
581 582
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
583
    output->mutable_data(place, self.type());
584 585 586
#endif
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
587
    output->mutable_data(place, self.type());
588
#endif
W
wopeizl 已提交
589
  } else {
590
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
W
wopeizl 已提交
591
    if (platform::is_cuda_pinned_place(place)) {
592
      output->mutable_data(place, self.type());
W
wopeizl 已提交
593
    } else if ((platform::is_gpu_place(place))) {
594
      output->mutable_data(place, self.type());
W
wopeizl 已提交
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
    }
#endif
  }
  return output;
}

template <typename T>
void _sliceDapper(const framework::Tensor *in, framework::Tensor *out,
                  const platform::CPUDeviceContext &ctx,
                  const std::vector<int> &axes, const std::vector<int> &starts,
                  int size) {
  switch (size) {
    case 1:
      _sliceCompute<T, 1>(in, out, ctx, axes, starts);
      break;
    case 2:
      _sliceCompute<T, 2>(in, out, ctx, axes, starts);
      break;
    case 3:
      _sliceCompute<T, 3>(in, out, ctx, axes, starts);
      break;
    case 4:
      _sliceCompute<T, 4>(in, out, ctx, axes, starts);
      break;
    case 5:
      _sliceCompute<T, 5>(in, out, ctx, axes, starts);
      break;
    case 6:
      _sliceCompute<T, 6>(in, out, ctx, axes, starts);
      break;
    case 7:
      _sliceCompute<T, 7>(in, out, ctx, axes, starts);
      break;
    case 8:
      _sliceCompute<T, 8>(in, out, ctx, axes, starts);
      break;
    case 9:
      _sliceCompute<T, 9>(in, out, ctx, axes, starts);
      break;
    default:
635 636
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The dim size should be 1 to 9, current is %d", size));
W
wopeizl 已提交
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
      break;
  }
}

template <typename T>
inline framework::Tensor *_sliceWrapper(const framework::Tensor &self,
                                        const platform::CPUDeviceContext &ctx,
                                        py::object obj, int dim, int64_t start,
                                        int64_t slicelength) {
  framework::DDim dstDDim = self.dims();
  dstDDim[dim] = static_cast<int64_t>(slicelength);
  std::vector<int> axes({dim});
  std::vector<int> starts({static_cast<int>(start)});
  framework::Tensor *output = _getTensor(self, dstDDim);
  _sliceDapper<T>(&self, output, ctx, axes, starts, dstDDim.size());
  return output;
}

template <typename T>
inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self,
                                          py::object obj, int dim) {
  platform::CPUDeviceContext ctx;
  int64_t start, stop, step, slicelength;
  _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength);
  if (step == 1 || slicelength == 1) {
    return _sliceWrapper<T>(self, ctx, obj, dim, start, slicelength);
  } else {
    std::vector<framework::Tensor> ins;
    for (auto i = 0; i < slicelength; ++i, start += step) {
      ins.emplace_back(*_sliceWrapper<T>(self, ctx, obj, dim, start, 1));
    }

    // do the concat operation
    framework::DDim dstDDim = self.dims();
    dstDDim[dim] = static_cast<int64_t>(slicelength);
    framework::Tensor *output1 = _getTensor(self, dstDDim);
    _concatCompute<T>(ins, output1, ctx, dim);
    return output1;
  }
}

inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
                                       py::object obj, int dim) {
  auto src_type = self.type();
  switch (src_type) {
    case framework::proto::VarType::FP16:
      return _sliceAndConcat<paddle::platform::float16>(self, obj, dim);
684 685
    case framework::proto::VarType::BF16:
      return _sliceAndConcat<paddle::platform::bfloat16>(self, obj, dim);
686
    case framework::proto::VarType::COMPLEX64:
687
      return _sliceAndConcat<paddle::platform::complex<float>>(self, obj, dim);
688
    case framework::proto::VarType::COMPLEX128:
689
      return _sliceAndConcat<paddle::platform::complex<double>>(self, obj, dim);
W
wopeizl 已提交
690 691 692 693
    case framework::proto::VarType::FP32:
      return _sliceAndConcat<float>(self, obj, dim);
    case framework::proto::VarType::FP64:
      return _sliceAndConcat<double>(self, obj, dim);
L
Leo Chen 已提交
694 695 696 697
    case framework::proto::VarType::INT8:
      return _sliceAndConcat<int8_t>(self, obj, dim);
    case framework::proto::VarType::INT16:
      return _sliceAndConcat<int16_t>(self, obj, dim);
W
wopeizl 已提交
698 699 700 701 702 703 704
    case framework::proto::VarType::INT32:
      return _sliceAndConcat<int>(self, obj, dim);
    case framework::proto::VarType::INT64:
      return _sliceAndConcat<int64_t>(self, obj, dim);
    case framework::proto::VarType::BOOL:
      return _sliceAndConcat<bool>(self, obj, dim);
    case framework::proto::VarType::UINT8:
L
Leo Chen 已提交
705
      return _sliceAndConcat<uint8_t>(self, obj, dim);
W
wopeizl 已提交
706
    default:
707 708 709
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Not support tensor type: %s",
          framework::DataTypeToString(src_type)));
W
wopeizl 已提交
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
  }
}

inline framework::Tensor *_pySliceTensor(const framework::Tensor &self,
                                         py::object obj) {
  if (py::isinstance<py::tuple>(obj)) {
    py::list l = static_cast<py::list>(obj);
    std::unique_ptr<framework::Tensor> target;
    framework::Tensor *src = const_cast<framework::Tensor *>(&self);
    for (auto i = 0; i < static_cast<int>(l.size()); ++i) {
      src = _sliceTensor(*src, l[i], i);
      if (i + 1 == static_cast<int>(l.size())) {
        return src;
      } else {
        target.reset(src);
      }
    }
    return nullptr;
  } else {
    return _sliceTensor(self, obj, 0);
  }
}

inline framework::Tensor *PySliceTensor(const framework::Tensor &self,
                                        py::object obj) {
  if (platform::is_gpu_place(self.place())) {
    std::unique_ptr<framework::Tensor> holder;
    framework::Tensor src;
    framework::TensorCopySync(self, platform::CPUPlace(), &src);
    framework::Tensor *output = _pySliceTensor(src, obj);
    holder.reset(output);
    framework::Tensor *dst = _getTensor(*output, output->dims());
    framework::TensorCopySync(*output, self.place(), dst);
    return dst;
  } else {
    return _pySliceTensor(self, obj);
  }
}

749 750
inline py::array TensorToPyArray(const framework::Tensor &tensor,
                                 bool need_deep_copy = false) {
Q
qingqing01 已提交
751 752 753
  if (!tensor.IsInitialized()) {
    return py::array();
  }
754
  bool is_gpu_tensor = platform::is_gpu_place(tensor.place());
755
  bool is_xpu_tensor = platform::is_xpu_place(tensor.place());
756
  bool is_npu_tensor = platform::is_npu_place(tensor.place());
757
  bool is_mlu_tensor = platform::is_mlu_place(tensor.place());
758 759 760 761 762 763 764 765 766
  const auto &tensor_dims = tensor.dims();
  auto tensor_dtype = tensor.type();
  size_t sizeof_dtype = framework::SizeOfType(tensor_dtype);

  std::vector<size_t> py_dims(tensor_dims.size());
  std::vector<size_t> py_strides(tensor_dims.size());

  size_t numel = 1;
  for (int i = tensor_dims.size() - 1; i >= 0; --i) {
767
    py_dims[i] = static_cast<size_t>(tensor_dims[i]);
768 769 770 771
    py_strides[i] = sizeof_dtype * numel;
    numel *= py_dims[i];
  }

772
  const void *tensor_buf_ptr = tensor.data();
773 774 775

  std::string py_dtype_str = details::TensorDTypeToPyDTypeStr(tensor.type());

776
  if (!is_gpu_tensor && !is_xpu_tensor && !is_npu_tensor && !is_mlu_tensor) {
777
    if (!need_deep_copy) {
778 779 780
      auto base = py::cast(std::move(tensor));
      return py::array(py::dtype(py_dtype_str.c_str()), py_dims, py_strides,
                       const_cast<void *>(tensor_buf_ptr), base);
781 782
    } else {
      py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
783 784 785 786 787 788 789 790 791 792
      PADDLE_ENFORCE_EQ(
          py_arr.writeable(), true,
          platform::errors::InvalidArgument(
              "PyArray is not writable, in which case memory leak "
              "or double free would occur"));
      PADDLE_ENFORCE_EQ(
          py_arr.owndata(), true,
          platform::errors::InvalidArgument(
              "PyArray does not own data, in which case  memory leak "
              "or double free would occur"));
793 794 795 796 797 798
      platform::CPUPlace place;
      size_t copy_bytes = sizeof_dtype * numel;
      paddle::memory::Copy(place, py_arr.mutable_data(), place, tensor_buf_ptr,
                           copy_bytes);
      return py_arr;
    }
799 800 801 802 803 804 805 806 807 808 809 810 811 812
  } else if (is_xpu_tensor) {
#ifdef PADDLE_WITH_XPU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
813
    auto p = tensor.place();
814 815 816 817 818 819 820 821 822
    paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p,
                         tensor_buf_ptr, copy_bytes);
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
  } else if (is_gpu_tensor) {
823
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
824 825 826 827 828 829 830 831 832 833 834 835
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
836
    auto p = tensor.place();
837 838
    paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p,
                         tensor_buf_ptr, copy_bytes, nullptr);
839
    return py_arr;
840
#else
841 842 843
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CUDAPlace in CPU only version, "
        "Please recompile or reinstall Paddle with CUDA support."));
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
#endif
  } else if (is_npu_tensor) {
#ifdef PADDLE_WITH_ASCEND_CL
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
859
    auto p = tensor.place();
860 861 862 863 864 865 866 867 868 869 870 871
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
        platform::CPUPlace(), py_arr.mutable_data(), p, tensor_buf_ptr,
        copy_bytes,
        reinterpret_cast<const platform::NPUDeviceContext &>(ctx).stream());
    ctx.Wait();
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use NPUPlace in CPU/GPU/XPU version, "
        "Please recompile or reinstall Paddle with NPU support."));
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
#endif
  } else if (is_mlu_tensor) {
#ifdef PADDLE_WITH_MLU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
887
    auto p = tensor.place();
888 889 890 891 892 893 894
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
        platform::CPUPlace(), py_arr.mutable_data(), p, tensor_buf_ptr,
        copy_bytes,
        reinterpret_cast<const platform::MLUDeviceContext &>(ctx).stream());
    ctx.Wait();
895 896 897 898 899
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use MLUPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with MLU support."));
900
#endif
901 902 903
  }
  PADDLE_THROW(platform::errors::Unimplemented("Place is not supported"));
  return py::array();
904 905
}

906 907
}  // namespace pybind
}  // namespace paddle