tensor_py.h 40.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
16

L
Luo Tao 已提交
17
#include <Python.h>
W
wopeizl 已提交
18 19
#include <algorithm>
#include <memory>
Q
qijun 已提交
20
#include <string>
C
chengduoZH 已提交
21
#include <tuple>
22
#include <utility>
C
chengduoZH 已提交
23
#include <vector>
24
#include "paddle/fluid/framework/data_type.h"
Y
Yi Wang 已提交
25 26
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h"
27
#include "paddle/fluid/operators/eigen/eigen_function.h"
W
wopeizl 已提交
28 29
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
30
#include "paddle/fluid/platform/bfloat16.h"
31
#include "paddle/fluid/platform/device/device_wrapper.h"
32
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
33 34
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif
35
#include "paddle/fluid/framework/convert_utils.h"
Y
Yi Wang 已提交
36
#include "paddle/fluid/platform/device_context.h"
37
#include "paddle/fluid/platform/float16.h"
38
#include "paddle/fluid/platform/profiler/event_tracing.h"
Q
qijun 已提交
39 40
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
41

W
wopeizl 已提交
42 43
namespace py = pybind11;

44 45 46 47 48 49 50
namespace pybind11 {
namespace detail {

// Note: use same enum number of float16 in numpy.
// import numpy as np
// print np.dtype(np.float16).num  # 23
constexpr int NPY_FLOAT16_ = 23;
51
constexpr int NPY_UINT16_ = 4;
52 53
constexpr int NPY_COMPLEX64 = 14;
constexpr int NPY_COMPLEX128 = 15;
54

W
wanghuancoder 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
// cast numpy type form S to T, this may allocate new memory
template <class T, class S>
static py::array_t<T> CastNumpyType(py::array_t<S> array) {
  if (std::is_same<T, S>::value) {
    return array;
  }
  auto dim = array.ndim();
  std::vector<py::ssize_t> result_shape(dim);
  for (auto i = 0; i < dim; i++) {
    result_shape[i] = array.shape(i);
  }

  py::array_t<T> result(result_shape);

  return py::vectorize([](S s) { return static_cast<T>(s); })(array);
}

template <class T>
static py::array_t<T> CastNumpyArray(const py::object &array) {
  if (py::isinstance<py::array_t<float>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<float>>());
  } else if (py::isinstance<py::array_t<double>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<double>>());
  } else if (py::isinstance<py::array_t<int32_t>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<int32_t>>());
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<int64_t>>());
  } else if (py::isinstance<py::array_t<bool>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<bool>>());
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
        "Value type error. The assign numpy value allows integer, float, "
        "double and bool, "
        "but received %s.",
        Py_TYPE(array.ptr())->tp_name));
  }
  // can't reach here
  return py::array_t<T>();
}

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
// Note: Since float16 is not a builtin type in C++, we register
// paddle::platform::float16 as numpy.float16.
// Ref: https://github.com/pybind/pybind11/issues/1776
template <>
struct npy_format_descriptor<paddle::platform::float16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "e" represents float16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "e";
  }
110
  static constexpr auto name = _("float16");
111 112
};

113 114 115 116 117 118 119 120 121 122 123 124 125 126
// Note: Since bfloat16 is not a builtin type in C++ and in numpy,
// we register paddle::platform::bfloat16 as numpy.uint16.
template <>
struct npy_format_descriptor<paddle::platform::bfloat16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_UINT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "H" represents UINT16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "H";
  }
127
  static constexpr auto name = _("bfloat16");
128 129
};

130
// we register paddle::platform::complex<float> as numpy.complex64.
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
template <>
struct npy_format_descriptor<paddle::platform::complex<float>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX64);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "F" represents complex64.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "F";
  }
  static constexpr auto name = _("complext64");
};

template <>
struct npy_format_descriptor<paddle::platform::complex<double>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX128);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "D" represents complex128.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "D";
  }
  static constexpr auto name = _("complext128");
};

167 168 169
}  // namespace detail
}  // namespace pybind11

170
namespace paddle {
171
namespace pybind {
172

173 174
namespace details {

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
template <typename T>
class PYBIND11_HIDDEN NumpyAllocation : public memory::Allocation {
 public:
  explicit NumpyAllocation(const py::array &arr)
      : Allocation(const_cast<void *>(arr.data()), sizeof(T) * (arr.size()),
                   paddle::platform::CPUPlace()),
        arr_(arr.ptr()) {
    PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument(
                                      "The underlying PyObject pointer of "
                                      "numpy array cannot be nullptr"));
    PADDLE_ENFORCE_NE(
        arr_, Py_None,
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~NumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject *arr_;
};

200 201 202 203 204 205 206 207 208 209 210 211
template <typename T>
struct ValidDTypeToPyArrayChecker {
  static constexpr bool kValue = false;
};

#define DECLARE_VALID_DTYPE_TO_PY_ARRAY(type) \
  template <>                                 \
  struct ValidDTypeToPyArrayChecker<type> {   \
    static constexpr bool kValue = true;      \
  }

DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::float16);
212
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::bfloat16);
213 214
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<float>);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<double>);
215 216 217 218
DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
L
Leo Chen 已提交
219
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
220 221
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
L
Leo Chen 已提交
222
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
223 224 225 226 227 228 229

inline std::string TensorDTypeToPyDTypeStr(
    framework::proto::VarType::Type type) {
#define TENSOR_DTYPE_TO_PY_DTYPE(T, proto_type)                             \
  if (type == proto_type) {                                                 \
    if (std::is_same<T, platform::float16>::value) {                        \
      return "e";                                                           \
230 231 232
    } else if (std::is_same<T, platform::bfloat16>::value) {                \
      /* NumPy character code of uint16 due to no support for bfloat16 */   \
      return "H";                                                           \
233 234 235 236
    } else if (std::is_same<T, platform::complex<float>>::value) {          \
      return "F";                                                           \
    } else if (std::is_same<T, platform::complex<double>>::value) {         \
      return "D";                                                           \
237 238
    } else {                                                                \
      constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
239 240 241 242 243
      PADDLE_ENFORCE_EQ(                                                    \
          kIsValidDType, true,                                              \
          platform::errors::Unimplemented(                                  \
              "This type [%s] of tensor cannot be expose to Python",        \
              typeid(T).name()));                                           \
244 245 246 247 248 249
      return py::format_descriptor<T>::format();                            \
    }                                                                       \
  }

  _ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE
250 251
  PADDLE_THROW(platform::errors::Unimplemented(
      "Unsupported tensor data type: %s", framework::DataTypeToString(type)));
252 253 254 255
}

}  // namespace details

256
template <typename T>
257
T TensorGetElement(const framework::Tensor &self, size_t offset) {
258 259 260
  PADDLE_ENFORCE_LT(offset, self.numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
261

Q
qingqing01 已提交
262
  T b = static_cast<T>(0);
263
  if (platform::is_cpu_place(self.place())) {
Q
qingqing01 已提交
264
    b = self.data<T>()[offset];
265 266 267
  } else if (platform::is_xpu_place(self.place())) {
#ifdef PADDLE_WITH_XPU
    const T *a = self.data<T>();
268
    auto p = self.place();
269 270 271
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self.place())) {
272
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
qingqing01 已提交
273
    const T *a = self.data<T>();
274
    auto p = self.place();
Q
qingqing01 已提交
275 276
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
277 278 279 280
#endif
  } else if (platform::is_mlu_place(self.place())) {
#ifdef PADDLE_WITH_MLU
    const T *a = self.data<T>();
281
    auto p = self.place();
282 283
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
284 285 286 287
#endif
  } else if (platform::is_npu_place(self.place())) {
#if defined(PADDLE_WITH_ASCEND_CL)
    const T *a = self.data<T>();
288
    auto p = self.place();
289 290
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
291 292 293 294 295 296 297
#endif
  } else if (platform::is_custom_place(self.place())) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    const T *a = self.data<T>();
    auto p = self.place();
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
Q
qingqing01 已提交
298
#endif
299
  }
300 301
  VLOG(10) << "TensorGetElement, place: " << self.place()
           << ", offset: " << offset << ", element: " << b;
Q
qingqing01 已提交
302
  return b;
303 304 305
}

template <typename T>
306
void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
307 308 309
  PADDLE_ENFORCE_LT(offset, self->numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
310 311
  VLOG(10) << "TensorSetElement, place: " << self->place()
           << ", offset: " << offset << ", element: " << elem;
Q
qingqing01 已提交
312
  if (platform::is_cpu_place(self->place())) {
Y
Yu Yang 已提交
313
    self->mutable_data<T>(self->place())[offset] = elem;
314 315
  } else if (platform::is_xpu_place(self->place())) {
#ifdef PADDLE_WITH_XPU
316
    auto p = self->place();
317 318 319 320
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self->place())) {
321
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
322
    auto p = self->place();
Q
qingqing01 已提交
323 324 325
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
326 327 328
#endif
  } else if (platform::is_mlu_place(self->place())) {
#ifdef PADDLE_WITH_MLU
329
    auto p = self->place();
330 331 332
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
333 334 335
#endif
  } else if (platform::is_npu_place(self->place())) {
#if defined(PADDLE_WITH_ASCEND_CL)
336
    auto p = self->place();
337 338 339
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
340 341 342 343 344 345 346
#endif
  } else if (platform::is_custom_place(self->place())) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    auto p = self->place();
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
Q
qingqing01 已提交
347
#endif
348
  }
349 350
}

351 352 353
template <typename T, typename P>
void SetTensorFromPyArrayT(
    framework::Tensor *self,
354
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
355
    const P &place, bool zero_copy) {
356 357 358 359 360
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.push_back(static_cast<int>(array.shape()[i]));
  }
361
  self->Resize(phi::make_ddim(dims));
362 363

  if (paddle::platform::is_cpu_place(place)) {
364 365 366
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
367
      self->ResetHolderWithType(holder, framework::TransToPhiDataType(type));
368 369 370 371
    } else {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
    }
372 373
  } else if (paddle::platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
W
WangXi 已提交
374 375 376
    // NOTE(wangxi): When copying data to the accelerator card,
    // we need set_device(dev_id) first.
    platform::Place tmp_place = place;
377
    platform::XPUDeviceGuard guard(tmp_place.device);
378
    auto dst = self->mutable_data<T>(place);
379
    memory::Copy(tmp_place, static_cast<void *>(dst), platform::CPUPlace(),
T
taixiurong 已提交
380
                 static_cast<const void *>(array.data()), array.nbytes());
381 382 383 384
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
J
jianghaicheng 已提交
385 386 387 388 389 390
#endif
  } else if (paddle::platform::is_ipu_place(place)) {
#ifdef PADDLE_WITH_IPU
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
391
      self->ResetHolderWithType(holder, framework::TransToPhiDataType(type));
J
jianghaicheng 已提交
392
    } else {
393 394 395 396 397 398 399 400
      // IPU does not store Tensor data, Tensor will be created on CPU
      if (!self->initialized()) {
        auto dst = self->mutable_data<T>(place);
        std::memcpy(dst, array.data(), array.nbytes());
      } else {
        auto dst = self->mutable_data<T>(self->place());
        std::memcpy(dst, array.data(), array.nbytes());
      }
J
jianghaicheng 已提交
401 402 403 404 405
    }
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use IPUPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with IPU support."));
406 407 408 409
#endif
  } else if (paddle::platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
    platform::Place tmp_place = place;
410
    platform::NPUDeviceGuard guard(tmp_place.device);
411 412 413 414 415 416 417 418 419 420
    auto dst = self->mutable_data<T>(place);
    platform::NPUMemcpySync(dst, array.data(), array.nbytes(),
                            ACL_MEMCPY_HOST_TO_DEVICE);
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(place);
    ctx.Wait();
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use NPUPlace in CPU/GPU/XPU version. "
        "Please recompile or reinstall Paddle with NPU support."));
421 422 423 424
#endif
  } else if (paddle::platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
    platform::Place tmp_place = place;
425
    platform::MLUDeviceGuard guard(tmp_place.device);
426 427 428 429 430 431
    auto dst = self->mutable_data<T>(place);
    paddle::platform::MLUMemcpyH2DSync(dst, array.data(), array.nbytes());
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use MLUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with MLU support."));
432 433 434 435
#endif
  } else if (paddle::platform::is_custom_place(place)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    platform::Place tmp_place = place;
436
    phi::DeviceGuard guard(tmp_place);
437 438
    auto dst = self->mutable_data<T>(place);

439
    phi::DeviceManager::GetDeviceWithPlace(tmp_place)->MemoryCopyH2D(
440 441 442 443 444 445 446 447 448 449
        reinterpret_cast<void *>(dst),
        const_cast<void *>(reinterpret_cast<const void *>(array.data())),
        array.nbytes());
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(place);
    ctx.Wait();
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CustomDevice in CPU/GPU/XPU version. "
        "Please recompile or reinstall Paddle with CustomDevice support."));
450
#endif
451
  } else {
452
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
453
    if (paddle::platform::is_gpu_place(place)) {
W
WangXi 已提交
454 455
      // NOTE(wangxi): When copying data to the accelerator card,
      // we need set_device(dev_id) first.
456
      platform::CUDADeviceGuard guard(place.device);
457
      auto dst = self->mutable_data<T>(place);
458 459 460 461
#ifdef PADDLE_WITH_HIP
      paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
                                      hipMemcpyHostToDevice);
#else
462 463
      paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
                                      cudaMemcpyHostToDevice);
464
#endif
465

466 467 468
    } else if (paddle::platform::is_cuda_pinned_place(place)) {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
469
    } else {
470 471 472
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible place type: Tensor.set() supports "
          "CPUPlace, CUDAPlace "
473
          "and CUDAPinnedPlace, but got %s!",
474
          place));
475 476
    }
#else
477
    PADDLE_THROW(platform::errors::PermissionDenied(
478
        "Cannot use CUDAPlace or CUDAPinnedPlace in CPU only version, "
479
        "Please recompile or reinstall Paddle with CUDA support."));
480 481 482 483 484
#endif
  }
}

template <typename P>
485
void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
486
                          const P &place, bool zero_copy) {
487
  auto array = obj.cast<py::array>();
488
  if (py::isinstance<py::array_t<float>>(array)) {
489
    SetTensorFromPyArrayT<float, P>(self, array, place, zero_copy);
490
  } else if (py::isinstance<py::array_t<int>>(array)) {
491
    SetTensorFromPyArrayT<int, P>(self, array, place, zero_copy);
492
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
493
    SetTensorFromPyArrayT<int64_t, P>(self, array, place, zero_copy);
494
  } else if (py::isinstance<py::array_t<double>>(array)) {
495
    SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
496
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
497
    SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
L
Leo Chen 已提交
498 499
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
500
  } else if (py::isinstance<py::array_t<uint8_t>>(array)) {
501
    SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
502
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
503 504
    SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
                                                        zero_copy);
505 506 507 508 509 510 511 512
  } else if (py::isinstance<py::array_t<paddle::platform::complex<float>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<float>, P>(
        self, array, place, zero_copy);
  } else if (py::isinstance<py::array_t<paddle::platform::complex<double>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<double>, P>(
        self, array, place, zero_copy);
513
  } else if (py::isinstance<py::array_t<uint16_t>>(array)) {
514 515 516 517
    // since there is still no support for bfloat16 in NumPy,
    // uint16 is used for casting bfloat16
    SetTensorFromPyArrayT<paddle::platform::bfloat16, P>(self, array, place,
                                                         zero_copy);
518
  } else if (py::isinstance<py::array_t<bool>>(array)) {
519
    SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
520
  } else {
521 522
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning,
523
    PADDLE_THROW(platform::errors::InvalidArgument(
524 525 526 527
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64, uint8 or uint16, "
        "please check your input or input array data type."));
528 529 530
  }
}

S
Siming Dai 已提交
531 532 533 534 535 536 537 538 539 540 541 542 543
template <typename T>
void SetUVATensorFromPyArray(
    const std::shared_ptr<paddle::imperative::VarBase> &self,
    const py::array_t<T> &array, int device_id) {
#if defined(PADDLE_WITH_CUDA)
  auto *self_tensor = self->MutableVar()->GetMutable<framework::LoDTensor>();
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  int64_t numel = 1;
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.emplace_back(static_cast<int>(array.shape()[i]));
    numel *= static_cast<int>(array.shape()[i]);
  }
544
  self_tensor->Resize(phi::make_ddim(dims));
S
Siming Dai 已提交
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559

  auto data_type = framework::ToDataType(std::type_index(typeid(T)));
  const auto &need_allocate_size = numel * framework::SizeOfType(data_type);
  T *data_ptr;
  cudaHostAlloc(reinterpret_cast<void **>(&data_ptr), need_allocate_size,
                cudaHostAllocWriteCombined | cudaHostAllocMapped);
  std::memcpy(data_ptr, array.data(), array.nbytes());

  void *cuda_device_pointer = nullptr;
  cudaHostGetDevicePointer(reinterpret_cast<void **>(&cuda_device_pointer),
                           reinterpret_cast<void *>(data_ptr), 0);
  std::shared_ptr<memory::allocation::Allocation> holder =
      std::make_shared<memory::allocation::Allocation>(
          cuda_device_pointer, need_allocate_size,
          platform::CUDAPlace(device_id));
560
  self_tensor->ResetHolderWithType(holder,
561
                                   framework::TransToPhiDataType(data_type));
S
Siming Dai 已提交
562 563 564
#endif
}

W
wopeizl 已提交
565 566 567 568 569 570 571 572 573
template <typename T, size_t D>
void _sliceCompute(const framework::Tensor *in, framework::Tensor *out,
                   const platform::CPUDeviceContext &ctx,
                   const std::vector<int> &axes,
                   const std::vector<int> &starts) {
  auto &eigen_place = *ctx.eigen_device();
  auto out_dims = out->dims();
  auto in_dims = in->dims();

574 575
  auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>();
  auto extents = Eigen::DSizes<Eigen::DenseIndex, D>();
W
wopeizl 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
  for (size_t i = 0; i < D; ++i) {
    offsets[i] = 0;
    extents[i] = out_dims[i];
  }
  int start;
  for (size_t i = 0; i < axes.size(); ++i) {
    start = starts[i];
    if (start < 0) {
      start = (start + in_dims[axes[i]]);
    }
    start = std::max(start, 0);
    offsets[axes[i]] = start;
  }
  auto in_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *in);
  auto out_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *out);
595 596
  operators::EigenSlice<std::decay_t<decltype(eigen_place)>, T, D>::Eval(
      eigen_place, out_t, in_t, offsets, extents);
W
wopeizl 已提交
597 598 599 600 601 602 603 604 605
}

template <typename T>
void _concatCompute(const std::vector<paddle::framework::Tensor> &ins,
                    paddle::framework::Tensor *out,
                    const platform::CPUDeviceContext &ctx, int64_t axis) {
  if (axis == 0 && ins.size() < 10) {
    size_t output_offset = 0;
    for (auto &in : ins) {
606 607
      auto in_stride = phi::stride_numel(in.dims());
      auto out_stride = phi::stride_numel(out->dims());
W
wopeizl 已提交
608 609 610 611 612 613 614 615 616 617 618 619
      paddle::operators::StridedNumelCopyWithAxis<T>(
          ctx, axis, out->data<T>() + output_offset, out_stride, in.data<T>(),
          in_stride, in_stride[axis]);
      output_offset += in_stride[axis];
    }
  } else {
    paddle::operators::math::ConcatFunctor<platform::CPUDeviceContext, T>
        concat_functor;
    concat_functor(ctx, ins, static_cast<int>(axis), out);
  }
}

L
Leo Chen 已提交
620 621 622
inline void _getSliceinfo(const framework::Tensor &self, py::object obj,
                          const int64_t dim, int64_t *pstart, int64_t *pstop,
                          int64_t *pstep, int64_t *pslicelength) {
W
wopeizl 已提交
623 624 625 626 627
  auto &start = *pstart;
  auto &stop = *pstop;
  auto &step = *pstep;
  auto &slicelength = *pslicelength;
  const framework::DDim &srcDDim = self.dims();
Z
zyfncg 已提交
628 629 630 631 632 633
  PADDLE_ENFORCE(
      0 <= dim && dim < srcDDim.size(),
      platform::errors::OutOfRange("The dim %d of slice is out of bounds, it "
                                   "shound be in the range of [0, %d).",
                                   dim, srcDDim.size()));

W
wopeizl 已提交
634 635 636 637
  if (py::isinstance<py::slice>(obj)) {
    size_t lstart, lstop, lstep, lslicelength;
    py::slice s = static_cast<py::slice>(obj);
    if (!s.compute(srcDDim[dim], &lstart, &lstop, &lstep, &lslicelength)) {
Z
zyfncg 已提交
638 639 640 641
      PADDLE_THROW(platform::errors::OutOfRange(
          "Slice on dim: %d is error, please check the validity of tensor "
          "dims or slice item.",
          dim));
W
wopeizl 已提交
642 643 644 645 646 647 648
    }
    start = static_cast<int64_t>(lstart);
    stop = static_cast<int64_t>(lstop);
    step = static_cast<int64_t>(lstep);
    slicelength = static_cast<int64_t>(lslicelength);
  } else if (py::isinstance<py::int_>(obj)) {
    start = static_cast<int64_t>(static_cast<py::int_>(obj));
Z
zyfncg 已提交
649 650 651 652 653
    PADDLE_ENFORCE(
        std::abs(start) < srcDDim[dim],
        platform::errors::OutOfRange("The start %d of slice is out of bounds, "
                                     "it shound be in the range of (%d, %d).",
                                     start, -srcDDim[dim], srcDDim[dim]));
W
wopeizl 已提交
654 655 656 657 658
    start = (start >= 0) ? start : srcDDim[dim] - start;
    stop = start + 1;
    step = 1;
    slicelength = 1;
  } else {
Z
zyfncg 已提交
659 660 661
    PADDLE_THROW(
        platform::errors::OutOfRange("Index object error, the index object for "
                                     "slice only supports slice(::) and int."));
W
wopeizl 已提交
662 663 664 665 666 667 668 669 670
  }
}

inline framework::Tensor *_getTensor(const framework::Tensor &self,
                                     const framework::DDim &ddim) {
  framework::Tensor *output = new framework::Tensor();
  output->Resize(ddim);
  auto place = self.place();
  if (platform::is_cpu_place(place)) {
671
    output->mutable_data(place, self.dtype());
672 673
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
674
    output->mutable_data(place, self.dtype());
675 676 677
#endif
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
678
    output->mutable_data(place, self.dtype());
679
#endif
W
wopeizl 已提交
680
  } else {
681
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
W
wopeizl 已提交
682
    if (platform::is_cuda_pinned_place(place)) {
683
      output->mutable_data(place, self.dtype());
W
wopeizl 已提交
684
    } else if ((platform::is_gpu_place(place))) {
685
      output->mutable_data(place, self.dtype());
W
wopeizl 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
    }
#endif
  }
  return output;
}

template <typename T>
void _sliceDapper(const framework::Tensor *in, framework::Tensor *out,
                  const platform::CPUDeviceContext &ctx,
                  const std::vector<int> &axes, const std::vector<int> &starts,
                  int size) {
  switch (size) {
    case 1:
      _sliceCompute<T, 1>(in, out, ctx, axes, starts);
      break;
    case 2:
      _sliceCompute<T, 2>(in, out, ctx, axes, starts);
      break;
    case 3:
      _sliceCompute<T, 3>(in, out, ctx, axes, starts);
      break;
    case 4:
      _sliceCompute<T, 4>(in, out, ctx, axes, starts);
      break;
    case 5:
      _sliceCompute<T, 5>(in, out, ctx, axes, starts);
      break;
    case 6:
      _sliceCompute<T, 6>(in, out, ctx, axes, starts);
      break;
    case 7:
      _sliceCompute<T, 7>(in, out, ctx, axes, starts);
      break;
    case 8:
      _sliceCompute<T, 8>(in, out, ctx, axes, starts);
      break;
    case 9:
      _sliceCompute<T, 9>(in, out, ctx, axes, starts);
      break;
    default:
726 727
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The dim size should be 1 to 9, current is %d", size));
W
wopeizl 已提交
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
      break;
  }
}

template <typename T>
inline framework::Tensor *_sliceWrapper(const framework::Tensor &self,
                                        const platform::CPUDeviceContext &ctx,
                                        py::object obj, int dim, int64_t start,
                                        int64_t slicelength) {
  framework::DDim dstDDim = self.dims();
  dstDDim[dim] = static_cast<int64_t>(slicelength);
  std::vector<int> axes({dim});
  std::vector<int> starts({static_cast<int>(start)});
  framework::Tensor *output = _getTensor(self, dstDDim);
  _sliceDapper<T>(&self, output, ctx, axes, starts, dstDDim.size());
  return output;
}

template <typename T>
inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self,
                                          py::object obj, int dim) {
  platform::CPUDeviceContext ctx;
  int64_t start, stop, step, slicelength;
  _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength);
  if (step == 1 || slicelength == 1) {
    return _sliceWrapper<T>(self, ctx, obj, dim, start, slicelength);
  } else {
    std::vector<framework::Tensor> ins;
    for (auto i = 0; i < slicelength; ++i, start += step) {
      ins.emplace_back(*_sliceWrapper<T>(self, ctx, obj, dim, start, 1));
    }

    // do the concat operation
    framework::DDim dstDDim = self.dims();
    dstDDim[dim] = static_cast<int64_t>(slicelength);
    framework::Tensor *output1 = _getTensor(self, dstDDim);
    _concatCompute<T>(ins, output1, ctx, dim);
    return output1;
  }
}

inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
                                       py::object obj, int dim) {
771
  auto src_type = framework::TransToProtoVarType(self.dtype());
W
wopeizl 已提交
772 773 774
  switch (src_type) {
    case framework::proto::VarType::FP16:
      return _sliceAndConcat<paddle::platform::float16>(self, obj, dim);
775 776
    case framework::proto::VarType::BF16:
      return _sliceAndConcat<paddle::platform::bfloat16>(self, obj, dim);
777
    case framework::proto::VarType::COMPLEX64:
778
      return _sliceAndConcat<paddle::platform::complex<float>>(self, obj, dim);
779
    case framework::proto::VarType::COMPLEX128:
780
      return _sliceAndConcat<paddle::platform::complex<double>>(self, obj, dim);
W
wopeizl 已提交
781 782 783 784
    case framework::proto::VarType::FP32:
      return _sliceAndConcat<float>(self, obj, dim);
    case framework::proto::VarType::FP64:
      return _sliceAndConcat<double>(self, obj, dim);
L
Leo Chen 已提交
785 786 787 788
    case framework::proto::VarType::INT8:
      return _sliceAndConcat<int8_t>(self, obj, dim);
    case framework::proto::VarType::INT16:
      return _sliceAndConcat<int16_t>(self, obj, dim);
W
wopeizl 已提交
789 790 791 792 793 794 795
    case framework::proto::VarType::INT32:
      return _sliceAndConcat<int>(self, obj, dim);
    case framework::proto::VarType::INT64:
      return _sliceAndConcat<int64_t>(self, obj, dim);
    case framework::proto::VarType::BOOL:
      return _sliceAndConcat<bool>(self, obj, dim);
    case framework::proto::VarType::UINT8:
L
Leo Chen 已提交
796
      return _sliceAndConcat<uint8_t>(self, obj, dim);
W
wopeizl 已提交
797
    default:
798 799 800
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Not support tensor type: %s",
          framework::DataTypeToString(src_type)));
W
wopeizl 已提交
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
  }
}

inline framework::Tensor *_pySliceTensor(const framework::Tensor &self,
                                         py::object obj) {
  if (py::isinstance<py::tuple>(obj)) {
    py::list l = static_cast<py::list>(obj);
    std::unique_ptr<framework::Tensor> target;
    framework::Tensor *src = const_cast<framework::Tensor *>(&self);
    for (auto i = 0; i < static_cast<int>(l.size()); ++i) {
      src = _sliceTensor(*src, l[i], i);
      if (i + 1 == static_cast<int>(l.size())) {
        return src;
      } else {
        target.reset(src);
      }
    }
    return nullptr;
  } else {
    return _sliceTensor(self, obj, 0);
  }
}

inline framework::Tensor *PySliceTensor(const framework::Tensor &self,
                                        py::object obj) {
  if (platform::is_gpu_place(self.place())) {
    std::unique_ptr<framework::Tensor> holder;
    framework::Tensor src;
    framework::TensorCopySync(self, platform::CPUPlace(), &src);
    framework::Tensor *output = _pySliceTensor(src, obj);
    holder.reset(output);
    framework::Tensor *dst = _getTensor(*output, output->dims());
    framework::TensorCopySync(*output, self.place(), dst);
    return dst;
  } else {
    return _pySliceTensor(self, obj);
  }
}

840 841
inline py::array TensorToPyArray(const framework::Tensor &tensor,
                                 bool need_deep_copy = false) {
Q
qingqing01 已提交
842 843 844
  if (!tensor.IsInitialized()) {
    return py::array();
  }
845
  bool is_gpu_tensor = platform::is_gpu_place(tensor.place());
846
  bool is_xpu_tensor = platform::is_xpu_place(tensor.place());
847
  bool is_npu_tensor = platform::is_npu_place(tensor.place());
848
  bool is_mlu_tensor = platform::is_mlu_place(tensor.place());
849
  bool is_custom_device_tensor = platform::is_custom_place(tensor.place());
850
  const auto &tensor_dims = tensor.dims();
851
  auto tensor_dtype = framework::TransToProtoVarType(tensor.dtype());
852 853 854 855 856 857 858
  size_t sizeof_dtype = framework::SizeOfType(tensor_dtype);

  std::vector<size_t> py_dims(tensor_dims.size());
  std::vector<size_t> py_strides(tensor_dims.size());

  size_t numel = 1;
  for (int i = tensor_dims.size() - 1; i >= 0; --i) {
859
    py_dims[i] = static_cast<size_t>(tensor_dims[i]);
860 861 862 863
    py_strides[i] = sizeof_dtype * numel;
    numel *= py_dims[i];
  }

864
  const void *tensor_buf_ptr = tensor.data();
865

866 867
  std::string py_dtype_str = details::TensorDTypeToPyDTypeStr(
      framework::TransToProtoVarType(tensor.dtype()));
868

869 870
  if (!is_gpu_tensor && !is_xpu_tensor && !is_npu_tensor && !is_mlu_tensor &&
      !is_custom_device_tensor) {
871
    if (!need_deep_copy) {
872 873 874
      auto base = py::cast(std::move(tensor));
      return py::array(py::dtype(py_dtype_str.c_str()), py_dims, py_strides,
                       const_cast<void *>(tensor_buf_ptr), base);
875 876
    } else {
      py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
877 878 879 880 881 882 883 884 885 886
      PADDLE_ENFORCE_EQ(
          py_arr.writeable(), true,
          platform::errors::InvalidArgument(
              "PyArray is not writable, in which case memory leak "
              "or double free would occur"));
      PADDLE_ENFORCE_EQ(
          py_arr.owndata(), true,
          platform::errors::InvalidArgument(
              "PyArray does not own data, in which case  memory leak "
              "or double free would occur"));
887 888 889 890 891 892
      platform::CPUPlace place;
      size_t copy_bytes = sizeof_dtype * numel;
      paddle::memory::Copy(place, py_arr.mutable_data(), place, tensor_buf_ptr,
                           copy_bytes);
      return py_arr;
    }
893 894 895 896 897 898 899 900 901 902 903 904 905 906
  } else if (is_xpu_tensor) {
#ifdef PADDLE_WITH_XPU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
907
    auto p = tensor.place();
908 909 910 911 912 913 914 915 916
    paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p,
                         tensor_buf_ptr, copy_bytes);
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
  } else if (is_gpu_tensor) {
917
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
918 919 920 921 922 923 924 925 926 927 928 929
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
930
    auto p = tensor.place();
931 932
    paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p,
                         tensor_buf_ptr, copy_bytes, nullptr);
933
    return py_arr;
934
#else
935 936 937
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CUDAPlace in CPU only version, "
        "Please recompile or reinstall Paddle with CUDA support."));
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
#endif
  } else if (is_npu_tensor) {
#ifdef PADDLE_WITH_ASCEND_CL
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
953
    auto p = tensor.place();
954 955 956 957 958 959 960 961 962 963 964 965
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
        platform::CPUPlace(), py_arr.mutable_data(), p, tensor_buf_ptr,
        copy_bytes,
        reinterpret_cast<const platform::NPUDeviceContext &>(ctx).stream());
    ctx.Wait();
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use NPUPlace in CPU/GPU/XPU version, "
        "Please recompile or reinstall Paddle with NPU support."));
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
#endif
  } else if (is_mlu_tensor) {
#ifdef PADDLE_WITH_MLU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
981
    auto p = tensor.place();
982 983 984 985 986 987 988
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
        platform::CPUPlace(), py_arr.mutable_data(), p, tensor_buf_ptr,
        copy_bytes,
        reinterpret_cast<const platform::MLUDeviceContext &>(ctx).stream());
    ctx.Wait();
989 990 991 992 993
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use MLUPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with MLU support."));
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
#endif
  } else if (is_custom_device_tensor) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
        platform::CPUPlace(), py_arr.mutable_data(), tensor.place(),
        tensor_buf_ptr, copy_bytes,
        reinterpret_cast<const platform::CustomDeviceContext &>(ctx).stream());
    ctx.Wait();
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CustomPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with CustomPlace "
        "support."));
1022
#endif
1023 1024 1025
  }
  PADDLE_THROW(platform::errors::Unimplemented("Place is not supported"));
  return py::array();
1026 1027
}

1028 1029
}  // namespace pybind
}  // namespace paddle