tensor_py.h 44.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
16

L
Luo Tao 已提交
17
#include <Python.h>
18

W
wopeizl 已提交
19 20
#include <algorithm>
#include <memory>
Q
qijun 已提交
21
#include <string>
C
chengduoZH 已提交
22
#include <tuple>
23
#include <utility>
C
chengduoZH 已提交
24
#include <vector>
25

26
#include "paddle/fluid/framework/data_type.h"
Y
Yi Wang 已提交
27 28
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h"
29
#include "paddle/fluid/operators/eigen/eigen_function.h"
W
wopeizl 已提交
30 31
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
32
#include "paddle/fluid/platform/bfloat16.h"
33
#include "paddle/fluid/platform/device/device_wrapper.h"
34
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
35 36
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif
37
#include "paddle/fluid/framework/convert_utils.h"
Y
Yi Wang 已提交
38
#include "paddle/fluid/platform/device_context.h"
39
#include "paddle/fluid/platform/float16.h"
40
#include "paddle/fluid/platform/profiler/event_tracing.h"
41
#include "paddle/phi/common/pstring.h"
J
Jack Zhou 已提交
42 43
#include "paddle/phi/core/string_tensor.h"
#include "paddle/phi/kernels/strings/unicode.h"
Q
qijun 已提交
44 45
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
46

W
wopeizl 已提交
47 48
namespace py = pybind11;

49 50 51 52 53 54 55
namespace pybind11 {
namespace detail {

// Note: use same enum number of float16 in numpy.
// import numpy as np
// print np.dtype(np.float16).num  # 23
constexpr int NPY_FLOAT16_ = 23;
56
constexpr int NPY_UINT16_ = 4;
57 58
constexpr int NPY_COMPLEX64 = 14;
constexpr int NPY_COMPLEX128 = 15;
59

W
wanghuancoder 已提交
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
// cast numpy type form S to T, this may allocate new memory
template <class T, class S>
static py::array_t<T> CastNumpyType(py::array_t<S> array) {
  if (std::is_same<T, S>::value) {
    return array;
  }
  auto dim = array.ndim();
  std::vector<py::ssize_t> result_shape(dim);
  for (auto i = 0; i < dim; i++) {
    result_shape[i] = array.shape(i);
  }

  py::array_t<T> result(result_shape);

  return py::vectorize([](S s) { return static_cast<T>(s); })(array);
}

template <class T>
static py::array_t<T> CastNumpyArray(const py::object &array) {
  if (py::isinstance<py::array_t<float>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<float>>());
  } else if (py::isinstance<py::array_t<double>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<double>>());
  } else if (py::isinstance<py::array_t<int32_t>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<int32_t>>());
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<int64_t>>());
  } else if (py::isinstance<py::array_t<bool>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<bool>>());
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
        "Value type error. The assign numpy value allows integer, float, "
        "double and bool, "
        "but received %s.",
        Py_TYPE(array.ptr())->tp_name));
  }
  // can't reach here
  return py::array_t<T>();
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
// Note: Since float16 is not a builtin type in C++, we register
// paddle::platform::float16 as numpy.float16.
// Ref: https://github.com/pybind/pybind11/issues/1776
template <>
struct npy_format_descriptor<paddle::platform::float16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "e" represents float16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "e";
  }
115
  static constexpr auto name = _("float16");
116 117
};

118 119 120 121 122 123 124 125 126 127 128 129 130 131
// Note: Since bfloat16 is not a builtin type in C++ and in numpy,
// we register paddle::platform::bfloat16 as numpy.uint16.
template <>
struct npy_format_descriptor<paddle::platform::bfloat16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_UINT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "H" represents UINT16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "H";
  }
132
  static constexpr auto name = _("bfloat16");
133 134
};

135
// we register paddle::platform::complex<float> as numpy.complex64.
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
template <>
struct npy_format_descriptor<paddle::platform::complex<float>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX64);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "F" represents complex64.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "F";
  }
  static constexpr auto name = _("complext64");
};

template <>
struct npy_format_descriptor<paddle::platform::complex<double>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX128);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "D" represents complex128.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "D";
  }
  static constexpr auto name = _("complext128");
};

172 173 174
}  // namespace detail
}  // namespace pybind11

175
namespace paddle {
176
namespace pybind {
177

178 179
namespace details {

180 181 182 183
template <typename T>
class PYBIND11_HIDDEN NumpyAllocation : public memory::Allocation {
 public:
  explicit NumpyAllocation(const py::array &arr)
184 185
      : Allocation(const_cast<void *>(arr.data()),
                   sizeof(T) * (arr.size()),
186 187
                   paddle::platform::CPUPlace()),
        arr_(arr.ptr()) {
188 189 190 191
    PADDLE_ENFORCE_NOT_NULL(
        arr_,
        platform::errors::InvalidArgument("The underlying PyObject pointer of "
                                          "numpy array cannot be nullptr"));
192
    PADDLE_ENFORCE_NE(
193 194
        arr_,
        Py_None,
195 196 197 198 199 200 201 202 203 204 205 206 207
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~NumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject *arr_;
};

208 209 210 211 212 213 214 215 216 217 218 219
template <typename T>
struct ValidDTypeToPyArrayChecker {
  static constexpr bool kValue = false;
};

#define DECLARE_VALID_DTYPE_TO_PY_ARRAY(type) \
  template <>                                 \
  struct ValidDTypeToPyArrayChecker<type> {   \
    static constexpr bool kValue = true;      \
  }

DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::float16);
220
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::bfloat16);
221 222
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<float>);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<double>);
223 224 225 226
DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
L
Leo Chen 已提交
227
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
228 229
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
L
Leo Chen 已提交
230
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
231 232 233 234 235 236 237

inline std::string TensorDTypeToPyDTypeStr(
    framework::proto::VarType::Type type) {
#define TENSOR_DTYPE_TO_PY_DTYPE(T, proto_type)                             \
  if (type == proto_type) {                                                 \
    if (std::is_same<T, platform::float16>::value) {                        \
      return "e";                                                           \
238 239 240
    } else if (std::is_same<T, platform::bfloat16>::value) {                \
      /* NumPy character code of uint16 due to no support for bfloat16 */   \
      return "H";                                                           \
241 242 243 244
    } else if (std::is_same<T, platform::complex<float>>::value) {          \
      return "F";                                                           \
    } else if (std::is_same<T, platform::complex<double>>::value) {         \
      return "D";                                                           \
245 246
    } else {                                                                \
      constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
247
      PADDLE_ENFORCE_EQ(                                                    \
248 249
          kIsValidDType,                                                    \
          true,                                                             \
250 251 252
          platform::errors::Unimplemented(                                  \
              "This type [%s] of tensor cannot be expose to Python",        \
              typeid(T).name()));                                           \
253 254 255 256 257 258
      return py::format_descriptor<T>::format();                            \
    }                                                                       \
  }

  _ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE
259 260
  PADDLE_THROW(platform::errors::Unimplemented(
      "Unsupported tensor data type: %s", framework::DataTypeToString(type)));
261 262 263 264
}

}  // namespace details

265
template <typename T>
266
T TensorGetElement(const framework::Tensor &self, size_t offset) {
267 268
  PADDLE_ENFORCE_LT(offset,
                    self.numel(),
269 270
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
271

Q
qingqing01 已提交
272
  T b = static_cast<T>(0);
273
  if (platform::is_cpu_place(self.place())) {
Q
qingqing01 已提交
274
    b = self.data<T>()[offset];
275 276 277
  } else if (platform::is_xpu_place(self.place())) {
#ifdef PADDLE_WITH_XPU
    const T *a = self.data<T>();
278
    auto p = self.place();
279 280 281
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self.place())) {
282
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
qingqing01 已提交
283
    const T *a = self.data<T>();
284
    auto p = self.place();
285 286
    paddle::memory::Copy(
        platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr);
287 288 289 290
#endif
  } else if (platform::is_mlu_place(self.place())) {
#ifdef PADDLE_WITH_MLU
    const T *a = self.data<T>();
291
    auto p = self.place();
292 293
    paddle::memory::Copy(
        platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr);
294 295 296 297
#endif
  } else if (platform::is_npu_place(self.place())) {
#if defined(PADDLE_WITH_ASCEND_CL)
    const T *a = self.data<T>();
298
    auto p = self.place();
299 300
    paddle::memory::Copy(
        platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr);
301 302 303 304 305
#endif
  } else if (platform::is_custom_place(self.place())) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    const T *a = self.data<T>();
    auto p = self.place();
306 307
    paddle::memory::Copy(
        platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr);
Q
qingqing01 已提交
308
#endif
309
  }
310 311
  VLOG(10) << "TensorGetElement, place: " << self.place()
           << ", offset: " << offset << ", element: " << b;
Q
qingqing01 已提交
312
  return b;
313 314 315
}

template <typename T>
316
void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
317 318
  PADDLE_ENFORCE_LT(offset,
                    self->numel(),
319 320
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
321 322
  VLOG(10) << "TensorSetElement, place: " << self->place()
           << ", offset: " << offset << ", element: " << elem;
Q
qingqing01 已提交
323
  if (platform::is_cpu_place(self->place())) {
Y
Yu Yang 已提交
324
    self->mutable_data<T>(self->place())[offset] = elem;
325 326
  } else if (platform::is_xpu_place(self->place())) {
#ifdef PADDLE_WITH_XPU
327
    auto p = self->place();
328 329 330 331
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self->place())) {
332
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
333
    auto p = self->place();
Q
qingqing01 已提交
334
    T *a = self->mutable_data<T>(p);
335 336
    paddle::memory::Copy(
        p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr);
337 338 339
#endif
  } else if (platform::is_mlu_place(self->place())) {
#ifdef PADDLE_WITH_MLU
340
    auto p = self->place();
341
    T *a = self->mutable_data<T>(p);
342 343
    paddle::memory::Copy(
        p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr);
344 345 346
#endif
  } else if (platform::is_npu_place(self->place())) {
#if defined(PADDLE_WITH_ASCEND_CL)
347
    auto p = self->place();
348
    T *a = self->mutable_data<T>(p);
349 350
    paddle::memory::Copy(
        p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr);
351 352 353 354 355
#endif
  } else if (platform::is_custom_place(self->place())) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    auto p = self->place();
    T *a = self->mutable_data<T>(p);
356 357
    paddle::memory::Copy(
        p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr);
Q
qingqing01 已提交
358
#endif
359
  }
360 361
}

362 363 364
template <typename T, typename P>
void SetTensorFromPyArrayT(
    framework::Tensor *self,
365
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
366 367
    const P &place,
    bool zero_copy) {
368 369 370
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
371
    dims.push_back(static_cast<int64_t>(array.shape()[i]));
372
  }
373
  self->Resize(phi::make_ddim(dims));
374 375

  if (paddle::platform::is_cpu_place(place)) {
376 377 378
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
379
      self->ResetHolderWithType(holder, framework::TransToPhiDataType(type));
380 381 382 383
    } else {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
    }
384 385
  } else if (paddle::platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
W
WangXi 已提交
386 387 388
    // NOTE(wangxi): When copying data to the accelerator card,
    // we need set_device(dev_id) first.
    platform::Place tmp_place = place;
389
    platform::XPUDeviceGuard guard(tmp_place.device);
390
    auto dst = self->mutable_data<T>(place);
391 392 393 394 395
    memory::Copy(tmp_place,
                 static_cast<void *>(dst),
                 platform::CPUPlace(),
                 static_cast<const void *>(array.data()),
                 array.nbytes());
396 397 398 399
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
J
jianghaicheng 已提交
400 401 402 403 404 405
#endif
  } else if (paddle::platform::is_ipu_place(place)) {
#ifdef PADDLE_WITH_IPU
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
406
      self->ResetHolderWithType(holder, framework::TransToPhiDataType(type));
J
jianghaicheng 已提交
407
    } else {
408 409 410 411 412 413 414 415
      // IPU does not store Tensor data, Tensor will be created on CPU
      if (!self->initialized()) {
        auto dst = self->mutable_data<T>(place);
        std::memcpy(dst, array.data(), array.nbytes());
      } else {
        auto dst = self->mutable_data<T>(self->place());
        std::memcpy(dst, array.data(), array.nbytes());
      }
J
jianghaicheng 已提交
416 417 418 419 420
    }
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use IPUPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with IPU support."));
421 422 423 424
#endif
  } else if (paddle::platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
    platform::Place tmp_place = place;
425
    platform::NPUDeviceGuard guard(tmp_place.device);
426
    auto dst = self->mutable_data<T>(place);
427 428
    platform::NPUMemcpySync(
        dst, array.data(), array.nbytes(), ACL_MEMCPY_HOST_TO_DEVICE);
429 430 431 432 433 434 435
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(place);
    ctx.Wait();
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use NPUPlace in CPU/GPU/XPU version. "
        "Please recompile or reinstall Paddle with NPU support."));
436 437 438 439
#endif
  } else if (paddle::platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
    platform::Place tmp_place = place;
440
    platform::MLUDeviceGuard guard(tmp_place.device);
441
    auto dst = self->mutable_data<T>(place);
F
fwenguang 已提交
442 443 444 445 446
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto dev_ctx = static_cast<platform::MLUDeviceContext *>(pool.Get(place));
    paddle::platform::MLUMemcpyH2DAsync(
        dst, array.data(), array.nbytes(), dev_ctx->stream());
    dev_ctx->Wait();
447 448 449 450
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use MLUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with MLU support."));
451 452 453 454
#endif
  } else if (paddle::platform::is_custom_place(place)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    platform::Place tmp_place = place;
455
    phi::DeviceGuard guard(tmp_place);
456 457
    auto dst = self->mutable_data<T>(place);

458
    phi::DeviceManager::GetDeviceWithPlace(tmp_place)->MemoryCopyH2D(
459 460 461 462 463 464 465 466 467 468
        reinterpret_cast<void *>(dst),
        const_cast<void *>(reinterpret_cast<const void *>(array.data())),
        array.nbytes());
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(place);
    ctx.Wait();
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CustomDevice in CPU/GPU/XPU version. "
        "Please recompile or reinstall Paddle with CustomDevice support."));
469
#endif
470
  } else {
471
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
472
    if (paddle::platform::is_gpu_place(place)) {
W
WangXi 已提交
473 474
      // NOTE(wangxi): When copying data to the accelerator card,
      // we need set_device(dev_id) first.
475
      platform::CUDADeviceGuard guard(place.device);
476
      auto dst = self->mutable_data<T>(place);
477
#ifdef PADDLE_WITH_HIP
478 479
      paddle::platform::GpuMemcpySync(
          dst, array.data(), array.nbytes(), hipMemcpyHostToDevice);
480
#else
481 482
      paddle::platform::GpuMemcpySync(
          dst, array.data(), array.nbytes(), cudaMemcpyHostToDevice);
483
#endif
484

485 486 487
    } else if (paddle::platform::is_cuda_pinned_place(place)) {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
488
    } else {
489 490 491
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible place type: Tensor.set() supports "
          "CPUPlace, CUDAPlace "
492
          "and CUDAPinnedPlace, but got %s!",
493
          place));
494 495
    }
#else
496
    PADDLE_THROW(platform::errors::PermissionDenied(
497
        "Cannot use CUDAPlace or CUDAPinnedPlace in CPU only version, "
498
        "Please recompile or reinstall Paddle with CUDA support."));
499 500 501 502 503
#endif
  }
}

template <typename P>
504 505 506 507
void SetTensorFromPyArray(framework::Tensor *self,
                          const py::object &obj,
                          const P &place,
                          bool zero_copy) {
508
  auto array = obj.cast<py::array>();
509
  if (py::isinstance<py::array_t<float>>(array)) {
510
    SetTensorFromPyArrayT<float, P>(self, array, place, zero_copy);
511
  } else if (py::isinstance<py::array_t<int>>(array)) {
512
    SetTensorFromPyArrayT<int, P>(self, array, place, zero_copy);
513
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
514
    SetTensorFromPyArrayT<int64_t, P>(self, array, place, zero_copy);
515
  } else if (py::isinstance<py::array_t<double>>(array)) {
516
    SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
517
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
518
    SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
L
Leo Chen 已提交
519 520
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
521
  } else if (py::isinstance<py::array_t<uint8_t>>(array)) {
522
    SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
523
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
524 525
    SetTensorFromPyArrayT<paddle::platform::float16, P>(
        self, array, place, zero_copy);
526 527 528 529 530 531 532 533
  } else if (py::isinstance<py::array_t<paddle::platform::complex<float>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<float>, P>(
        self, array, place, zero_copy);
  } else if (py::isinstance<py::array_t<paddle::platform::complex<double>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<double>, P>(
        self, array, place, zero_copy);
534
  } else if (py::isinstance<py::array_t<uint16_t>>(array)) {
535 536
    // since there is still no support for bfloat16 in NumPy,
    // uint16 is used for casting bfloat16
537 538
    SetTensorFromPyArrayT<paddle::platform::bfloat16, P>(
        self, array, place, zero_copy);
539
  } else if (py::isinstance<py::array_t<bool>>(array)) {
540
    SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
541
  } else {
542 543
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning,
544
    PADDLE_THROW(platform::errors::InvalidArgument(
545 546 547 548
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64, uint8 or uint16, "
        "please check your input or input array data type."));
549 550 551
  }
}

J
Jack Zhou 已提交
552
template <typename P>
553 554
void SetStringTensorFromPyArray(phi::StringTensor *self,
                                const py::array &array,
J
Jack Zhou 已提交
555 556 557
                                const P &place) {
  bool is_string_pyarray =
      array.dtype().kind() == 'S' || array.dtype().kind() == 'U';
558 559
  PADDLE_ENFORCE_EQ(is_string_pyarray,
                    true,
J
Jack Zhou 已提交
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
                    platform::errors::InvalidArgument(
                        "Expect the dtype of numpy array is string or "
                        "unicode, but recevie dtype %s",
                        array.dtype()));
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.push_back(static_cast<int>(array.shape()[i]));
  }
  self->Resize(phi::make_ddim(dims));
  auto itemsize = array.itemsize();
  if (paddle::platform::is_cpu_place(place)) {
    auto dst = self->mutable_data(place);
    if (array.dtype().kind() == 'S') {
      for (int i = 0; i < self->numel(); ++i) {
        dst[i] =
            pstring(reinterpret_cast<const char *>(array.data()) + itemsize * i,
                    itemsize);
      }
    } else {
      // array.dtype().kind() == 'U'
      VLOG(6) << "numpy array itemsize: " << itemsize;
      for (int i = 0; i < self->numel(); ++i) {
        // Note(zhoushunjie): The itemsize of unicode numpy array is the
        // the size of each unicode string. Each unicode string is aligned
        // to max length of the array of unicode strings, so the size of
        // each unicode string is same. The size of each unicode character is
        // 4, so the size of unicode string is 4 times of the length of
        // unicode string.
        auto unicode_len = itemsize / 4;
        auto utf8_len = phi::strings::GetUTF8StrLen(
            reinterpret_cast<const uint32_t *>(array.data()) + unicode_len * i,
            unicode_len);
        pstring pstr(utf8_len - 1, 0);
        phi::strings::GetUTF8Str(
            reinterpret_cast<const uint32_t *>(array.data()) + unicode_len * i,
597 598
            pstr.mdata(),
            unicode_len);
J
Jack Zhou 已提交
599 600 601 602 603 604 605 606 607 608
        dst[i] = pstr;
      }
    }
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "StringTensor only support CPUPlace now, but receive %s",
        place.DebugString()));
  }
}

S
Siming Dai 已提交
609
template <typename T>
610
void SetUVATensorFromPyArrayImpl(framework::LoDTensor *self_tensor,
611 612
                                 const py::array_t<T> &array,
                                 int device_id) {
S
Siming Dai 已提交
613
#if defined(PADDLE_WITH_CUDA)
614
  VLOG(4) << "Running in SetUVATensorFromPyArrayImpl.";
S
Siming Dai 已提交
615 616 617 618
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  int64_t numel = 1;
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
619 620
    dims.emplace_back(static_cast<int64_t>(array.shape()[i]));
    numel *= static_cast<int64_t>(array.shape()[i]);
S
Siming Dai 已提交
621
  }
622
  self_tensor->Resize(phi::make_ddim(dims));
S
Siming Dai 已提交
623 624 625 626

  auto data_type = framework::ToDataType(std::type_index(typeid(T)));
  const auto &need_allocate_size = numel * framework::SizeOfType(data_type);
  T *data_ptr;
627 628
  cudaHostAlloc(reinterpret_cast<void **>(&data_ptr),
                need_allocate_size,
S
Siming Dai 已提交
629 630 631 632 633
                cudaHostAllocWriteCombined | cudaHostAllocMapped);
  std::memcpy(data_ptr, array.data(), array.nbytes());

  void *cuda_device_pointer = nullptr;
  cudaHostGetDevicePointer(reinterpret_cast<void **>(&cuda_device_pointer),
634 635
                           reinterpret_cast<void *>(data_ptr),
                           0);
S
Siming Dai 已提交
636 637
  std::shared_ptr<memory::allocation::Allocation> holder =
      std::make_shared<memory::allocation::Allocation>(
638 639
          cuda_device_pointer,
          need_allocate_size,
S
Siming Dai 已提交
640
          platform::CUDAPlace(device_id));
641
  self_tensor->ResetHolderWithType(holder,
642
                                   framework::TransToPhiDataType(data_type));
S
Siming Dai 已提交
643 644 645
#endif
}

646 647 648
template <typename T>
void SetUVATensorFromPyArray(
    const std::shared_ptr<paddle::imperative::VarBase> &self,
649 650
    const py::array_t<T> &array,
    int device_id) {
651 652 653 654 655 656 657 658 659 660
#if defined(PADDLE_WITH_CUDA)
  VLOG(4) << "Running in SetUVATensorFromPyArray for VarBase.";
  auto *self_tensor = self->MutableVar()->GetMutable<framework::LoDTensor>();
  SetUVATensorFromPyArrayImpl<T>(self_tensor, array, device_id);
#endif
}

template <typename T>
void SetUVATensorFromPyArray(
    const std::shared_ptr<paddle::experimental::Tensor> &self,
661 662
    const py::array_t<T> &array,
    int device_id) {
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
#if defined(PADDLE_WITH_CUDA)
  VLOG(4) << "Running in SetUVATensorFromPyArray for Phi::Tensor.";
  phi::DenseTensorMeta meta =
      phi::DenseTensorMeta(phi::DataType::FLOAT32, phi::make_ddim({1, 1}));
  std::shared_ptr<phi::DenseTensor> tmp_t = std::make_shared<phi::DenseTensor>(
      std::make_unique<paddle::experimental::DefaultAllocator>(
          paddle::platform::CPUPlace())
          .get(),
      meta);
  self.get()->set_impl(tmp_t);
  auto *self_tensor =
      static_cast<paddle::framework::LoDTensor *>(self.get()->impl().get());

  SetUVATensorFromPyArrayImpl<T>(self_tensor, array, device_id);
#endif
}

W
wopeizl 已提交
680
template <typename T, size_t D>
681 682
void _sliceCompute(const framework::Tensor *in,
                   framework::Tensor *out,
L
Leo Chen 已提交
683
                   const phi::CPUContext &ctx,
W
wopeizl 已提交
684 685 686 687 688 689
                   const std::vector<int> &axes,
                   const std::vector<int> &starts) {
  auto &eigen_place = *ctx.eigen_device();
  auto out_dims = out->dims();
  auto in_dims = in->dims();

690 691
  auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>();
  auto extents = Eigen::DSizes<Eigen::DenseIndex, D>();
W
wopeizl 已提交
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
  for (size_t i = 0; i < D; ++i) {
    offsets[i] = 0;
    extents[i] = out_dims[i];
  }
  int start;
  for (size_t i = 0; i < axes.size(); ++i) {
    start = starts[i];
    if (start < 0) {
      start = (start + in_dims[axes[i]]);
    }
    start = std::max(start, 0);
    offsets[axes[i]] = start;
  }
  auto in_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *in);
  auto out_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *out);
711 712
  operators::EigenSlice<std::decay_t<decltype(eigen_place)>, T, D>::Eval(
      eigen_place, out_t, in_t, offsets, extents);
W
wopeizl 已提交
713 714 715 716 717
}

template <typename T>
void _concatCompute(const std::vector<paddle::framework::Tensor> &ins,
                    paddle::framework::Tensor *out,
L
Leo Chen 已提交
718
                    const phi::CPUContext &ctx,
719
                    int64_t axis) {
W
wopeizl 已提交
720 721 722
  if (axis == 0 && ins.size() < 10) {
    size_t output_offset = 0;
    for (auto &in : ins) {
723 724
      auto in_stride = phi::stride_numel(in.dims());
      auto out_stride = phi::stride_numel(out->dims());
W
wopeizl 已提交
725
      paddle::operators::StridedNumelCopyWithAxis<T>(
726 727 728 729 730 731 732
          ctx,
          axis,
          out->data<T>() + output_offset,
          out_stride,
          in.data<T>(),
          in_stride,
          in_stride[axis]);
W
wopeizl 已提交
733 734 735
      output_offset += in_stride[axis];
    }
  } else {
L
Leo Chen 已提交
736
    paddle::operators::math::ConcatFunctor<phi::CPUContext, T> concat_functor;
W
wopeizl 已提交
737 738 739 740
    concat_functor(ctx, ins, static_cast<int>(axis), out);
  }
}

741 742 743 744 745 746 747
inline void _getSliceinfo(const framework::Tensor &self,
                          py::object obj,
                          const int64_t dim,
                          int64_t *pstart,
                          int64_t *pstop,
                          int64_t *pstep,
                          int64_t *pslicelength) {
W
wopeizl 已提交
748 749 750 751 752
  auto &start = *pstart;
  auto &stop = *pstop;
  auto &step = *pstep;
  auto &slicelength = *pslicelength;
  const framework::DDim &srcDDim = self.dims();
Z
zyfncg 已提交
753 754 755 756
  PADDLE_ENFORCE(
      0 <= dim && dim < srcDDim.size(),
      platform::errors::OutOfRange("The dim %d of slice is out of bounds, it "
                                   "shound be in the range of [0, %d).",
757 758
                                   dim,
                                   srcDDim.size()));
Z
zyfncg 已提交
759

W
wopeizl 已提交
760 761 762 763
  if (py::isinstance<py::slice>(obj)) {
    size_t lstart, lstop, lstep, lslicelength;
    py::slice s = static_cast<py::slice>(obj);
    if (!s.compute(srcDDim[dim], &lstart, &lstop, &lstep, &lslicelength)) {
Z
zyfncg 已提交
764 765 766 767
      PADDLE_THROW(platform::errors::OutOfRange(
          "Slice on dim: %d is error, please check the validity of tensor "
          "dims or slice item.",
          dim));
W
wopeizl 已提交
768 769 770 771 772 773 774
    }
    start = static_cast<int64_t>(lstart);
    stop = static_cast<int64_t>(lstop);
    step = static_cast<int64_t>(lstep);
    slicelength = static_cast<int64_t>(lslicelength);
  } else if (py::isinstance<py::int_>(obj)) {
    start = static_cast<int64_t>(static_cast<py::int_>(obj));
Z
zyfncg 已提交
775 776 777 778
    PADDLE_ENFORCE(
        std::abs(start) < srcDDim[dim],
        platform::errors::OutOfRange("The start %d of slice is out of bounds, "
                                     "it shound be in the range of (%d, %d).",
779 780 781
                                     start,
                                     -srcDDim[dim],
                                     srcDDim[dim]));
W
wopeizl 已提交
782 783 784 785 786
    start = (start >= 0) ? start : srcDDim[dim] - start;
    stop = start + 1;
    step = 1;
    slicelength = 1;
  } else {
Z
zyfncg 已提交
787 788 789
    PADDLE_THROW(
        platform::errors::OutOfRange("Index object error, the index object for "
                                     "slice only supports slice(::) and int."));
W
wopeizl 已提交
790 791 792 793 794 795 796 797 798
  }
}

inline framework::Tensor *_getTensor(const framework::Tensor &self,
                                     const framework::DDim &ddim) {
  framework::Tensor *output = new framework::Tensor();
  output->Resize(ddim);
  auto place = self.place();
  if (platform::is_cpu_place(place)) {
799
    output->mutable_data(place, self.dtype());
800 801
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
802
    output->mutable_data(place, self.dtype());
803 804 805
#endif
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
806
    output->mutable_data(place, self.dtype());
807
#endif
W
wopeizl 已提交
808
  } else {
809
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
W
wopeizl 已提交
810
    if (platform::is_cuda_pinned_place(place)) {
811
      output->mutable_data(place, self.dtype());
W
wopeizl 已提交
812
    } else if ((platform::is_gpu_place(place))) {
813
      output->mutable_data(place, self.dtype());
W
wopeizl 已提交
814 815 816 817 818 819 820
    }
#endif
  }
  return output;
}

template <typename T>
821 822
void _sliceDapper(const framework::Tensor *in,
                  framework::Tensor *out,
L
Leo Chen 已提交
823
                  const phi::CPUContext &ctx,
824 825
                  const std::vector<int> &axes,
                  const std::vector<int> &starts,
W
wopeizl 已提交
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
                  int size) {
  switch (size) {
    case 1:
      _sliceCompute<T, 1>(in, out, ctx, axes, starts);
      break;
    case 2:
      _sliceCompute<T, 2>(in, out, ctx, axes, starts);
      break;
    case 3:
      _sliceCompute<T, 3>(in, out, ctx, axes, starts);
      break;
    case 4:
      _sliceCompute<T, 4>(in, out, ctx, axes, starts);
      break;
    case 5:
      _sliceCompute<T, 5>(in, out, ctx, axes, starts);
      break;
    case 6:
      _sliceCompute<T, 6>(in, out, ctx, axes, starts);
      break;
    case 7:
      _sliceCompute<T, 7>(in, out, ctx, axes, starts);
      break;
    case 8:
      _sliceCompute<T, 8>(in, out, ctx, axes, starts);
      break;
    case 9:
      _sliceCompute<T, 9>(in, out, ctx, axes, starts);
      break;
    default:
856 857
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The dim size should be 1 to 9, current is %d", size));
W
wopeizl 已提交
858 859 860 861 862 863
      break;
  }
}

template <typename T>
inline framework::Tensor *_sliceWrapper(const framework::Tensor &self,
L
Leo Chen 已提交
864
                                        const phi::CPUContext &ctx,
865 866 867
                                        py::object obj,
                                        int dim,
                                        int64_t start,
W
wopeizl 已提交
868 869 870 871 872 873 874 875 876 877 878 879
                                        int64_t slicelength) {
  framework::DDim dstDDim = self.dims();
  dstDDim[dim] = static_cast<int64_t>(slicelength);
  std::vector<int> axes({dim});
  std::vector<int> starts({static_cast<int>(start)});
  framework::Tensor *output = _getTensor(self, dstDDim);
  _sliceDapper<T>(&self, output, ctx, axes, starts, dstDDim.size());
  return output;
}

template <typename T>
inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self,
880 881
                                          py::object obj,
                                          int dim) {
L
Leo Chen 已提交
882
  phi::CPUContext ctx;
W
wopeizl 已提交
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
  int64_t start, stop, step, slicelength;
  _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength);
  if (step == 1 || slicelength == 1) {
    return _sliceWrapper<T>(self, ctx, obj, dim, start, slicelength);
  } else {
    std::vector<framework::Tensor> ins;
    for (auto i = 0; i < slicelength; ++i, start += step) {
      ins.emplace_back(*_sliceWrapper<T>(self, ctx, obj, dim, start, 1));
    }

    // do the concat operation
    framework::DDim dstDDim = self.dims();
    dstDDim[dim] = static_cast<int64_t>(slicelength);
    framework::Tensor *output1 = _getTensor(self, dstDDim);
    _concatCompute<T>(ins, output1, ctx, dim);
    return output1;
  }
}

inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
903 904
                                       py::object obj,
                                       int dim) {
905
  auto src_type = framework::TransToProtoVarType(self.dtype());
W
wopeizl 已提交
906 907 908
  switch (src_type) {
    case framework::proto::VarType::FP16:
      return _sliceAndConcat<paddle::platform::float16>(self, obj, dim);
909 910
    case framework::proto::VarType::BF16:
      return _sliceAndConcat<paddle::platform::bfloat16>(self, obj, dim);
911
    case framework::proto::VarType::COMPLEX64:
912
      return _sliceAndConcat<paddle::platform::complex<float>>(self, obj, dim);
913
    case framework::proto::VarType::COMPLEX128:
914
      return _sliceAndConcat<paddle::platform::complex<double>>(self, obj, dim);
W
wopeizl 已提交
915 916 917 918
    case framework::proto::VarType::FP32:
      return _sliceAndConcat<float>(self, obj, dim);
    case framework::proto::VarType::FP64:
      return _sliceAndConcat<double>(self, obj, dim);
L
Leo Chen 已提交
919 920 921 922
    case framework::proto::VarType::INT8:
      return _sliceAndConcat<int8_t>(self, obj, dim);
    case framework::proto::VarType::INT16:
      return _sliceAndConcat<int16_t>(self, obj, dim);
W
wopeizl 已提交
923 924 925 926 927 928 929
    case framework::proto::VarType::INT32:
      return _sliceAndConcat<int>(self, obj, dim);
    case framework::proto::VarType::INT64:
      return _sliceAndConcat<int64_t>(self, obj, dim);
    case framework::proto::VarType::BOOL:
      return _sliceAndConcat<bool>(self, obj, dim);
    case framework::proto::VarType::UINT8:
L
Leo Chen 已提交
930
      return _sliceAndConcat<uint8_t>(self, obj, dim);
W
wopeizl 已提交
931
    default:
932 933 934
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Not support tensor type: %s",
          framework::DataTypeToString(src_type)));
W
wopeizl 已提交
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
  }
}

inline framework::Tensor *_pySliceTensor(const framework::Tensor &self,
                                         py::object obj) {
  if (py::isinstance<py::tuple>(obj)) {
    py::list l = static_cast<py::list>(obj);
    std::unique_ptr<framework::Tensor> target;
    framework::Tensor *src = const_cast<framework::Tensor *>(&self);
    for (auto i = 0; i < static_cast<int>(l.size()); ++i) {
      src = _sliceTensor(*src, l[i], i);
      if (i + 1 == static_cast<int>(l.size())) {
        return src;
      } else {
        target.reset(src);
      }
    }
    return nullptr;
  } else {
    return _sliceTensor(self, obj, 0);
  }
}

inline framework::Tensor *PySliceTensor(const framework::Tensor &self,
                                        py::object obj) {
  if (platform::is_gpu_place(self.place())) {
    std::unique_ptr<framework::Tensor> holder;
    framework::Tensor src;
    framework::TensorCopySync(self, platform::CPUPlace(), &src);
    framework::Tensor *output = _pySliceTensor(src, obj);
    holder.reset(output);
    framework::Tensor *dst = _getTensor(*output, output->dims());
    framework::TensorCopySync(*output, self.place(), dst);
    return dst;
  } else {
    return _pySliceTensor(self, obj);
  }
}

974 975
inline py::array TensorToPyArray(const framework::Tensor &tensor,
                                 bool need_deep_copy = false) {
Q
qingqing01 已提交
976 977 978
  if (!tensor.IsInitialized()) {
    return py::array();
  }
979
  bool is_gpu_tensor = platform::is_gpu_place(tensor.place());
980
  bool is_xpu_tensor = platform::is_xpu_place(tensor.place());
981
  bool is_npu_tensor = platform::is_npu_place(tensor.place());
982
  bool is_mlu_tensor = platform::is_mlu_place(tensor.place());
983
  bool is_custom_device_tensor = platform::is_custom_place(tensor.place());
984
  const auto &tensor_dims = tensor.dims();
985
  auto tensor_dtype = framework::TransToProtoVarType(tensor.dtype());
986 987 988 989 990 991 992
  size_t sizeof_dtype = framework::SizeOfType(tensor_dtype);

  std::vector<size_t> py_dims(tensor_dims.size());
  std::vector<size_t> py_strides(tensor_dims.size());

  size_t numel = 1;
  for (int i = tensor_dims.size() - 1; i >= 0; --i) {
993
    py_dims[i] = static_cast<size_t>(tensor_dims[i]);
994 995 996 997
    py_strides[i] = sizeof_dtype * numel;
    numel *= py_dims[i];
  }

998
  const void *tensor_buf_ptr = tensor.data();
999

1000 1001
  std::string py_dtype_str = details::TensorDTypeToPyDTypeStr(
      framework::TransToProtoVarType(tensor.dtype()));
1002

1003 1004
  if (!is_gpu_tensor && !is_xpu_tensor && !is_npu_tensor && !is_mlu_tensor &&
      !is_custom_device_tensor) {
1005
    if (!need_deep_copy) {
1006
      auto base = py::cast(std::move(tensor));
1007 1008 1009 1010 1011
      return py::array(py::dtype(py_dtype_str.c_str()),
                       py_dims,
                       py_strides,
                       const_cast<void *>(tensor_buf_ptr),
                       base);
1012 1013
    } else {
      py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1014
      PADDLE_ENFORCE_EQ(
1015 1016
          py_arr.writeable(),
          true,
1017 1018 1019 1020
          platform::errors::InvalidArgument(
              "PyArray is not writable, in which case memory leak "
              "or double free would occur"));
      PADDLE_ENFORCE_EQ(
1021 1022
          py_arr.owndata(),
          true,
1023 1024 1025
          platform::errors::InvalidArgument(
              "PyArray does not own data, in which case  memory leak "
              "or double free would occur"));
1026 1027
      platform::CPUPlace place;
      size_t copy_bytes = sizeof_dtype * numel;
1028 1029
      paddle::memory::Copy(
          place, py_arr.mutable_data(), place, tensor_buf_ptr, copy_bytes);
1030 1031
      return py_arr;
    }
1032 1033 1034
  } else if (is_xpu_tensor) {
#ifdef PADDLE_WITH_XPU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1035 1036
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
1037 1038 1039 1040
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
1041 1042
        py_arr.owndata(),
        true,
1043 1044 1045 1046 1047
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
1048
    auto p = tensor.place();
1049 1050 1051 1052 1053
    paddle::memory::Copy(platform::CPUPlace(),
                         py_arr.mutable_data(),
                         p,
                         tensor_buf_ptr,
                         copy_bytes);
1054 1055 1056 1057 1058 1059 1060
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
  } else if (is_gpu_tensor) {
1061
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1062
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1063 1064
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
1065 1066 1067 1068
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
1069 1070
        py_arr.owndata(),
        true,
1071 1072 1073 1074 1075
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
1076
    auto p = tensor.place();
1077 1078 1079 1080 1081 1082
    paddle::memory::Copy(platform::CPUPlace(),
                         py_arr.mutable_data(),
                         p,
                         tensor_buf_ptr,
                         copy_bytes,
                         nullptr);
1083
    return py_arr;
1084
#else
1085 1086 1087
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CUDAPlace in CPU only version, "
        "Please recompile or reinstall Paddle with CUDA support."));
1088 1089 1090 1091
#endif
  } else if (is_npu_tensor) {
#ifdef PADDLE_WITH_ASCEND_CL
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1092 1093
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
1094 1095 1096 1097
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
1098 1099
        py_arr.owndata(),
        true,
1100 1101 1102 1103 1104
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
1105
    auto p = tensor.place();
1106 1107 1108
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
1109 1110 1111 1112
        platform::CPUPlace(),
        py_arr.mutable_data(),
        p,
        tensor_buf_ptr,
1113 1114 1115 1116 1117 1118 1119 1120
        copy_bytes,
        reinterpret_cast<const platform::NPUDeviceContext &>(ctx).stream());
    ctx.Wait();
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use NPUPlace in CPU/GPU/XPU version, "
        "Please recompile or reinstall Paddle with NPU support."));
1121 1122 1123 1124
#endif
  } else if (is_mlu_tensor) {
#ifdef PADDLE_WITH_MLU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1125 1126
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
1127 1128 1129 1130
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
1131 1132
        py_arr.owndata(),
        true,
1133 1134 1135 1136 1137
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
1138
    auto p = tensor.place();
1139 1140 1141
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
1142 1143 1144 1145
        platform::CPUPlace(),
        py_arr.mutable_data(),
        p,
        tensor_buf_ptr,
1146 1147 1148
        copy_bytes,
        reinterpret_cast<const platform::MLUDeviceContext &>(ctx).stream());
    ctx.Wait();
1149 1150 1151 1152 1153
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use MLUPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with MLU support."));
1154 1155 1156 1157
#endif
  } else if (is_custom_device_tensor) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1158 1159
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
1160 1161 1162 1163
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
1164 1165
        py_arr.owndata(),
        true,
1166 1167 1168 1169 1170 1171 1172 1173
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
1174 1175 1176 1177 1178
        platform::CPUPlace(),
        py_arr.mutable_data(),
        tensor.place(),
        tensor_buf_ptr,
        copy_bytes,
1179 1180 1181 1182 1183 1184 1185 1186
        reinterpret_cast<const platform::CustomDeviceContext &>(ctx).stream());
    ctx.Wait();
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CustomPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with CustomPlace "
        "support."));
1187
#endif
1188 1189 1190
  }
  PADDLE_THROW(platform::errors::Unimplemented("Place is not supported"));
  return py::array();
1191 1192
}

1193 1194
}  // namespace pybind
}  // namespace paddle