tensor_py.h 40.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
16

L
Luo Tao 已提交
17
#include <Python.h>
18 19 20 21
// Avoid a problem with copysign defined in pyconfig.h on Windows.
#ifdef copysign
#undef copysign
#endif
22

W
wopeizl 已提交
23 24
#include <algorithm>
#include <memory>
Q
qijun 已提交
25
#include <string>
C
chengduoZH 已提交
26
#include <tuple>
27
#include <utility>
C
chengduoZH 已提交
28
#include <vector>
29

30
#include "paddle/fluid/framework/data_type.h"
Y
Yi Wang 已提交
31 32
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h"
33
#include "paddle/fluid/operators/eigen/eigen_function.h"
W
wopeizl 已提交
34
#include "paddle/fluid/operators/math/concat_and_split.h"
35
#include "paddle/fluid/platform/bfloat16.h"
36
#include "paddle/fluid/platform/device/device_wrapper.h"
37
#include "paddle/phi/kernels/funcs/strided_memcpy.h"
38
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
39 40
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif
41
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
42
#include "paddle/fluid/framework/convert_utils.h"
Z
zyfncg 已提交
43
#include "paddle/fluid/framework/eigen.h"
Y
Yi Wang 已提交
44
#include "paddle/fluid/platform/device_context.h"
45
#include "paddle/fluid/platform/float16.h"
46
#include "paddle/fluid/platform/profiler/event_tracing.h"
47
#include "paddle/phi/common/pstring.h"
J
Jack Zhou 已提交
48 49
#include "paddle/phi/core/string_tensor.h"
#include "paddle/phi/kernels/strings/unicode.h"
Q
qijun 已提交
50 51
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
52

W
wopeizl 已提交
53 54
namespace py = pybind11;

55 56 57 58 59 60 61
namespace pybind11 {
namespace detail {

// Note: use same enum number of float16 in numpy.
// import numpy as np
// print np.dtype(np.float16).num  # 23
constexpr int NPY_FLOAT16_ = 23;
62
constexpr int NPY_UINT16_ = 4;
63 64
constexpr int NPY_COMPLEX64 = 14;
constexpr int NPY_COMPLEX128 = 15;
65

W
wanghuancoder 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
// cast numpy type form S to T, this may allocate new memory
template <class T, class S>
static py::array_t<T> CastNumpyType(py::array_t<S> array) {
  if (std::is_same<T, S>::value) {
    return array;
  }
  auto dim = array.ndim();
  std::vector<py::ssize_t> result_shape(dim);
  for (auto i = 0; i < dim; i++) {
    result_shape[i] = array.shape(i);
  }

  py::array_t<T> result(result_shape);

  return py::vectorize([](S s) { return static_cast<T>(s); })(array);
}

template <class T>
static py::array_t<T> CastNumpyArray(const py::object &array) {
  if (py::isinstance<py::array_t<float>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<float>>());
  } else if (py::isinstance<py::array_t<double>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<double>>());
  } else if (py::isinstance<py::array_t<int32_t>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<int32_t>>());
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<int64_t>>());
  } else if (py::isinstance<py::array_t<bool>>(array)) {
    return CastNumpyType<T>(array.cast<py::array_t<bool>>());
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
        "Value type error. The assign numpy value allows integer, float, "
        "double and bool, "
        "but received %s.",
        Py_TYPE(array.ptr())->tp_name));
  }
  // can't reach here
  return py::array_t<T>();
}

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
// Note: Since float16 is not a builtin type in C++, we register
// paddle::platform::float16 as numpy.float16.
// Ref: https://github.com/pybind/pybind11/issues/1776
template <>
struct npy_format_descriptor<paddle::platform::float16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "e" represents float16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "e";
  }
121
  static constexpr auto name = _("float16");
122 123
};

124 125 126 127 128 129 130 131 132 133 134 135 136 137
// Note: Since bfloat16 is not a builtin type in C++ and in numpy,
// we register paddle::platform::bfloat16 as numpy.uint16.
template <>
struct npy_format_descriptor<paddle::platform::bfloat16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_UINT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "H" represents UINT16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "H";
  }
138
  static constexpr auto name = _("bfloat16");
139 140
};

141
// we register paddle::platform::complex<float> as numpy.complex64.
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
template <>
struct npy_format_descriptor<paddle::platform::complex<float>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX64);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "F" represents complex64.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "F";
  }
  static constexpr auto name = _("complext64");
};

template <>
struct npy_format_descriptor<paddle::platform::complex<double>> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_COMPLEX128);
    return reinterpret_borrow<py::dtype>(ptr);
  }

  static std::string format() {
    // Note: "D" represents complex128.
    // Details at:
    // https://stackoverflow.com/questions/13997087/what-are-the-available-datatypes-for-dtype-with-numpys-loadtxt-an-genfromtx
    // for k, v in np.sctypeDict.iteritems():
    //     print '{0:14s} : {1:40s}'.format(str(k), v)
    return "D";
  }
  static constexpr auto name = _("complext128");
};

178 179 180
}  // namespace detail
}  // namespace pybind11

181
namespace paddle {
182
namespace pybind {
183

184 185
namespace details {

186 187 188 189
template <typename T>
class PYBIND11_HIDDEN NumpyAllocation : public memory::Allocation {
 public:
  explicit NumpyAllocation(const py::array &arr)
190 191
      : Allocation(const_cast<void *>(arr.data()),
                   sizeof(T) * (arr.size()),
192 193
                   paddle::platform::CPUPlace()),
        arr_(arr.ptr()) {
194 195 196 197
    PADDLE_ENFORCE_NOT_NULL(
        arr_,
        platform::errors::InvalidArgument("The underlying PyObject pointer of "
                                          "numpy array cannot be nullptr"));
198
    PADDLE_ENFORCE_NE(
199 200
        arr_,
        Py_None,
201 202 203 204 205 206 207 208 209 210 211 212 213
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~NumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject *arr_;
};

214 215 216 217 218 219 220 221 222 223 224 225
template <typename T>
struct ValidDTypeToPyArrayChecker {
  static constexpr bool kValue = false;
};

#define DECLARE_VALID_DTYPE_TO_PY_ARRAY(type) \
  template <>                                 \
  struct ValidDTypeToPyArrayChecker<type> {   \
    static constexpr bool kValue = true;      \
  }

DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::float16);
226
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::bfloat16);
227 228
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<float>);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::complex<double>);
229 230 231 232
DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
L
Leo Chen 已提交
233
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
234 235
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
L
Leo Chen 已提交
236
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
237 238 239 240 241 242 243

inline std::string TensorDTypeToPyDTypeStr(
    framework::proto::VarType::Type type) {
#define TENSOR_DTYPE_TO_PY_DTYPE(T, proto_type)                             \
  if (type == proto_type) {                                                 \
    if (std::is_same<T, platform::float16>::value) {                        \
      return "e";                                                           \
244 245 246
    } else if (std::is_same<T, platform::bfloat16>::value) {                \
      /* NumPy character code of uint16 due to no support for bfloat16 */   \
      return "H";                                                           \
247 248 249 250
    } else if (std::is_same<T, platform::complex<float>>::value) {          \
      return "F";                                                           \
    } else if (std::is_same<T, platform::complex<double>>::value) {         \
      return "D";                                                           \
251 252
    } else {                                                                \
      constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
253
      PADDLE_ENFORCE_EQ(                                                    \
254 255
          kIsValidDType,                                                    \
          true,                                                             \
256 257 258
          platform::errors::Unimplemented(                                  \
              "This type [%s] of tensor cannot be expose to Python",        \
              typeid(T).name()));                                           \
259 260 261 262 263 264
      return py::format_descriptor<T>::format();                            \
    }                                                                       \
  }

  _ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE
265 266
  PADDLE_THROW(platform::errors::Unimplemented(
      "Unsupported tensor data type: %s", framework::DataTypeToString(type)));
267 268 269 270
}

}  // namespace details

271
template <typename T>
272
T TensorGetElement(const phi::DenseTensor &self, size_t offset) {
273 274
  PADDLE_ENFORCE_LT(offset,
                    self.numel(),
275 276
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
277

Q
qingqing01 已提交
278
  T b = static_cast<T>(0);
279 280
  if (platform::is_cpu_place(self.place()) ||
      platform::is_cuda_pinned_place(self.place())) {
Q
qingqing01 已提交
281
    b = self.data<T>()[offset];
282 283 284
  } else if (platform::is_xpu_place(self.place())) {
#ifdef PADDLE_WITH_XPU
    const T *a = self.data<T>();
285
    auto p = self.place();
286 287
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T));
#endif
288 289
  } else if (platform::is_gpu_place(self.place()) ||
             platform::is_cuda_pinned_place(self.place())) {
290
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
qingqing01 已提交
291
    const T *a = self.data<T>();
292
    auto p = self.place();
293 294
    paddle::memory::Copy(
        platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr);
295 296 297 298 299
#endif
  } else if (platform::is_custom_place(self.place())) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    const T *a = self.data<T>();
    auto p = self.place();
300 301
    paddle::memory::Copy(
        platform::CPUPlace(), &b, p, a + offset, sizeof(T), nullptr);
Q
qingqing01 已提交
302
#endif
303
  }
304 305
  VLOG(10) << "TensorGetElement, place: " << self.place()
           << ", offset: " << offset << ", element: " << b;
Q
qingqing01 已提交
306
  return b;
307 308 309
}

template <typename T>
310
void TensorSetElement(phi::DenseTensor *self, size_t offset, T elem) {
311 312
  PADDLE_ENFORCE_LT(offset,
                    self->numel(),
313 314
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
315 316
  VLOG(10) << "TensorSetElement, place: " << self->place()
           << ", offset: " << offset << ", element: " << elem;
Q
qingqing01 已提交
317
  if (platform::is_cpu_place(self->place())) {
Y
Yu Yang 已提交
318
    self->mutable_data<T>(self->place())[offset] = elem;
319 320
  } else if (platform::is_xpu_place(self->place())) {
#ifdef PADDLE_WITH_XPU
321
    auto p = self->place();
322 323 324
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T));
#endif
325 326
  } else if (platform::is_gpu_place(self->place()) ||
             platform::is_cuda_pinned_place(self->place())) {
327
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
328
    auto p = self->place();
Q
qingqing01 已提交
329
    T *a = self->mutable_data<T>(p);
330 331
    paddle::memory::Copy(
        p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr);
332 333 334 335 336
#endif
  } else if (platform::is_custom_place(self->place())) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    auto p = self->place();
    T *a = self->mutable_data<T>(p);
337 338
    paddle::memory::Copy(
        p, a + offset, platform::CPUPlace(), &elem, sizeof(T), nullptr);
Q
qingqing01 已提交
339
#endif
340
  }
341 342
}

343 344
template <typename T, typename P>
void SetTensorFromPyArrayT(
345
    phi::DenseTensor *self,
346
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
347 348
    const P &place,
    bool zero_copy) {
349 350 351
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
352
    dims.push_back(static_cast<int64_t>(array.shape()[i]));
353
  }
354
  self->Resize(phi::make_ddim(dims));
355 356

  if (paddle::platform::is_cpu_place(place)) {
357 358 359
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
360
      self->ResetHolderWithType(holder, framework::TransToPhiDataType(type));
361 362 363 364
    } else {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
    }
365 366
  } else if (paddle::platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
W
WangXi 已提交
367 368 369
    // NOTE(wangxi): When copying data to the accelerator card,
    // we need set_device(dev_id) first.
    platform::Place tmp_place = place;
370
    platform::XPUDeviceGuard guard(tmp_place.device);
371
    auto dst = self->mutable_data<T>(place);
372 373 374 375 376
    memory::Copy(tmp_place,
                 static_cast<void *>(dst),
                 platform::CPUPlace(),
                 static_cast<const void *>(array.data()),
                 array.nbytes());
377 378 379 380
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
J
jianghaicheng 已提交
381 382 383 384 385 386
#endif
  } else if (paddle::platform::is_ipu_place(place)) {
#ifdef PADDLE_WITH_IPU
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
387
      self->ResetHolderWithType(holder, framework::TransToPhiDataType(type));
J
jianghaicheng 已提交
388
    } else {
389 390 391 392 393 394 395 396
      // IPU does not store Tensor data, Tensor will be created on CPU
      if (!self->initialized()) {
        auto dst = self->mutable_data<T>(place);
        std::memcpy(dst, array.data(), array.nbytes());
      } else {
        auto dst = self->mutable_data<T>(self->place());
        std::memcpy(dst, array.data(), array.nbytes());
      }
J
jianghaicheng 已提交
397 398 399 400 401
    }
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use IPUPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with IPU support."));
402 403 404 405
#endif
  } else if (paddle::platform::is_custom_place(place)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    platform::Place tmp_place = place;
406
    phi::DeviceGuard guard(tmp_place);
407 408
    auto dst = self->mutable_data<T>(place);

409
    phi::DeviceManager::GetDeviceWithPlace(tmp_place)->MemoryCopyH2D(
410 411 412 413 414 415 416 417 418 419
        reinterpret_cast<void *>(dst),
        const_cast<void *>(reinterpret_cast<const void *>(array.data())),
        array.nbytes());
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(place);
    ctx.Wait();
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CustomDevice in CPU/GPU/XPU version. "
        "Please recompile or reinstall Paddle with CustomDevice support."));
420
#endif
421
  } else {
422
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
423
    if (paddle::platform::is_gpu_place(place)) {
W
WangXi 已提交
424 425
      // NOTE(wangxi): When copying data to the accelerator card,
      // we need set_device(dev_id) first.
426
      platform::CUDADeviceGuard guard(place.device);
427
      auto dst = self->mutable_data<T>(place);
428
#ifdef PADDLE_WITH_HIP
429 430
      paddle::platform::GpuMemcpySync(
          dst, array.data(), array.nbytes(), hipMemcpyHostToDevice);
431
#else
432 433
      paddle::platform::GpuMemcpySync(
          dst, array.data(), array.nbytes(), cudaMemcpyHostToDevice);
434
#endif
435

436 437 438
    } else if (paddle::platform::is_cuda_pinned_place(place)) {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
439
    } else {
440 441 442
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible place type: Tensor.set() supports "
          "CPUPlace, CUDAPlace "
443
          "and CUDAPinnedPlace, but got %s!",
444
          place));
445 446
    }
#else
447
    PADDLE_THROW(platform::errors::PermissionDenied(
448
        "Cannot use CUDAPlace or CUDAPinnedPlace in CPU only version, "
449
        "Please recompile or reinstall Paddle with CUDA support."));
450 451 452 453 454
#endif
  }
}

template <typename P>
455
void SetTensorFromPyArray(phi::DenseTensor *self,
456 457 458
                          const py::object &obj,
                          const P &place,
                          bool zero_copy) {
459
  auto array = obj.cast<py::array>();
460
  if (py::isinstance<py::array_t<float>>(array)) {
461
    SetTensorFromPyArrayT<float, P>(self, array, place, zero_copy);
462
  } else if (py::isinstance<py::array_t<int>>(array)) {
463
    SetTensorFromPyArrayT<int, P>(self, array, place, zero_copy);
464
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
465
    SetTensorFromPyArrayT<int64_t, P>(self, array, place, zero_copy);
466
  } else if (py::isinstance<py::array_t<double>>(array)) {
467
    SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
468
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
469
    SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
L
Leo Chen 已提交
470 471
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
472
  } else if (py::isinstance<py::array_t<uint8_t>>(array)) {
473
    SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
474
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
475 476
    SetTensorFromPyArrayT<paddle::platform::float16, P>(
        self, array, place, zero_copy);
477 478 479 480 481 482 483 484
  } else if (py::isinstance<py::array_t<paddle::platform::complex<float>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<float>, P>(
        self, array, place, zero_copy);
  } else if (py::isinstance<py::array_t<paddle::platform::complex<double>>>(
                 array)) {
    SetTensorFromPyArrayT<paddle::platform::complex<double>, P>(
        self, array, place, zero_copy);
485
  } else if (py::isinstance<py::array_t<uint16_t>>(array)) {
486 487
    // since there is still no support for bfloat16 in NumPy,
    // uint16 is used for casting bfloat16
488 489
    SetTensorFromPyArrayT<paddle::platform::bfloat16, P>(
        self, array, place, zero_copy);
490
  } else if (py::isinstance<py::array_t<bool>>(array)) {
491
    SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
492
  } else {
493 494
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning,
495
    PADDLE_THROW(platform::errors::InvalidArgument(
496 497 498 499
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64, uint8 or uint16, "
        "please check your input or input array data type."));
500 501 502
  }
}

J
Jack Zhou 已提交
503
template <typename P>
504 505
void SetStringTensorFromPyArray(phi::StringTensor *self,
                                const py::array &array,
J
Jack Zhou 已提交
506 507 508
                                const P &place) {
  bool is_string_pyarray =
      array.dtype().kind() == 'S' || array.dtype().kind() == 'U';
509 510
  PADDLE_ENFORCE_EQ(is_string_pyarray,
                    true,
J
Jack Zhou 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
                    platform::errors::InvalidArgument(
                        "Expect the dtype of numpy array is string or "
                        "unicode, but recevie dtype %s",
                        array.dtype()));
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.push_back(static_cast<int>(array.shape()[i]));
  }
  self->Resize(phi::make_ddim(dims));
  auto itemsize = array.itemsize();
  if (paddle::platform::is_cpu_place(place)) {
    auto dst = self->mutable_data(place);
    if (array.dtype().kind() == 'S') {
      for (int i = 0; i < self->numel(); ++i) {
        dst[i] =
            pstring(reinterpret_cast<const char *>(array.data()) + itemsize * i,
                    itemsize);
      }
    } else {
      // array.dtype().kind() == 'U'
      VLOG(6) << "numpy array itemsize: " << itemsize;
      for (int i = 0; i < self->numel(); ++i) {
        // Note(zhoushunjie): The itemsize of unicode numpy array is the
        // the size of each unicode string. Each unicode string is aligned
        // to max length of the array of unicode strings, so the size of
        // each unicode string is same. The size of each unicode character is
        // 4, so the size of unicode string is 4 times of the length of
        // unicode string.
        auto unicode_len = itemsize / 4;
        auto utf8_len = phi::strings::GetUTF8StrLen(
            reinterpret_cast<const uint32_t *>(array.data()) + unicode_len * i,
            unicode_len);
        pstring pstr(utf8_len - 1, 0);
        phi::strings::GetUTF8Str(
            reinterpret_cast<const uint32_t *>(array.data()) + unicode_len * i,
548 549
            pstr.mdata(),
            unicode_len);
J
Jack Zhou 已提交
550 551 552 553 554 555 556 557 558 559
        dst[i] = pstr;
      }
    }
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "StringTensor only support CPUPlace now, but receive %s",
        place.DebugString()));
  }
}

S
Siming Dai 已提交
560
template <typename T>
S
Siming Dai 已提交
561
void SetUVATensorFromPyArrayImpl(
562
    phi::DenseTensor *self_tensor,
S
Siming Dai 已提交
563 564
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
    int device_id) {
S
Siming Dai 已提交
565
#if defined(PADDLE_WITH_CUDA)
566
  VLOG(4) << "Running in SetUVATensorFromPyArrayImpl.";
S
Siming Dai 已提交
567 568 569 570
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  int64_t numel = 1;
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
571 572
    dims.emplace_back(static_cast<int64_t>(array.shape()[i]));
    numel *= static_cast<int64_t>(array.shape()[i]);
S
Siming Dai 已提交
573
  }
574
  self_tensor->Resize(phi::make_ddim(dims));
S
Siming Dai 已提交
575 576 577 578

  auto data_type = framework::ToDataType(std::type_index(typeid(T)));
  const auto &need_allocate_size = numel * framework::SizeOfType(data_type);
  T *data_ptr;
579 580
  cudaHostAlloc(reinterpret_cast<void **>(&data_ptr),
                need_allocate_size,
S
Siming Dai 已提交
581 582 583 584 585
                cudaHostAllocWriteCombined | cudaHostAllocMapped);
  std::memcpy(data_ptr, array.data(), array.nbytes());

  void *cuda_device_pointer = nullptr;
  cudaHostGetDevicePointer(reinterpret_cast<void **>(&cuda_device_pointer),
586 587
                           reinterpret_cast<void *>(data_ptr),
                           0);
S
Siming Dai 已提交
588 589
  std::shared_ptr<memory::allocation::Allocation> holder =
      std::make_shared<memory::allocation::Allocation>(
590 591
          cuda_device_pointer,
          need_allocate_size,
S
Siming Dai 已提交
592
          platform::CUDAPlace(device_id));
593
  self_tensor->ResetHolderWithType(holder,
594
                                   framework::TransToPhiDataType(data_type));
S
Siming Dai 已提交
595 596 597
#endif
}

598 599 600
template <typename T>
void SetUVATensorFromPyArray(
    const std::shared_ptr<paddle::imperative::VarBase> &self,
S
Siming Dai 已提交
601
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
602
    int device_id) {
603 604
#if defined(PADDLE_WITH_CUDA)
  VLOG(4) << "Running in SetUVATensorFromPyArray for VarBase.";
605
  auto *self_tensor = self->MutableVar()->GetMutable<phi::DenseTensor>();
606 607 608 609 610
  SetUVATensorFromPyArrayImpl<T>(self_tensor, array, device_id);
#endif
}

template <typename T>
611 612 613
void SetUVATensorFromPyArray(const std::shared_ptr<paddle::Tensor> &self,
                             const py::array_t<T> &array,
                             int device_id) {
614 615 616 617 618 619 620 621 622 623
#if defined(PADDLE_WITH_CUDA)
  VLOG(4) << "Running in SetUVATensorFromPyArray for Phi::Tensor.";
  phi::DenseTensorMeta meta =
      phi::DenseTensorMeta(phi::DataType::FLOAT32, phi::make_ddim({1, 1}));
  std::shared_ptr<phi::DenseTensor> tmp_t = std::make_shared<phi::DenseTensor>(
      std::make_unique<paddle::experimental::DefaultAllocator>(
          paddle::platform::CPUPlace())
          .get(),
      meta);
  self.get()->set_impl(tmp_t);
624
  auto *self_tensor = static_cast<phi::DenseTensor *>(self.get()->impl().get());
625 626 627 628 629

  SetUVATensorFromPyArrayImpl<T>(self_tensor, array, device_id);
#endif
}

W
wopeizl 已提交
630
template <typename T, size_t D>
631 632
void _sliceCompute(const phi::DenseTensor *in,
                   phi::DenseTensor *out,
L
Leo Chen 已提交
633
                   const phi::CPUContext &ctx,
W
wopeizl 已提交
634 635 636
                   const std::vector<int> &axes,
                   const std::vector<int> &starts) {
  auto &eigen_place = *ctx.eigen_device();
637
  auto out_dims = phi::vectorize<int>(out->dims());
W
wopeizl 已提交
638 639
  auto in_dims = in->dims();

640 641
  auto offsets = Eigen::DSizes<Eigen::DenseIndex, D>();
  auto extents = Eigen::DSizes<Eigen::DenseIndex, D>();
W
wopeizl 已提交
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
  for (size_t i = 0; i < D; ++i) {
    offsets[i] = 0;
    extents[i] = out_dims[i];
  }
  int start;
  for (size_t i = 0; i < axes.size(); ++i) {
    start = starts[i];
    if (start < 0) {
      start = (start + in_dims[axes[i]]);
    }
    start = std::max(start, 0);
    offsets[axes[i]] = start;
  }
  auto in_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *in);
  auto out_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *out);
661 662
  operators::EigenSlice<std::decay_t<decltype(eigen_place)>, T, D>::Eval(
      eigen_place, out_t, in_t, offsets, extents);
W
wopeizl 已提交
663 664 665
}

template <typename T>
666 667
void _concatCompute(const std::vector<phi::DenseTensor> &ins,
                    phi::DenseTensor *out,
L
Leo Chen 已提交
668
                    const phi::CPUContext &ctx,
669
                    int64_t axis) {
W
wopeizl 已提交
670 671 672
  if (axis == 0 && ins.size() < 10) {
    size_t output_offset = 0;
    for (auto &in : ins) {
673 674
      auto in_stride = phi::stride_numel(in.dims());
      auto out_stride = phi::stride_numel(out->dims());
675 676 677 678 679 680 681 682
      phi::funcs::StridedNumelCopyWithAxis<T, phi::CPUContext>(
          ctx,
          axis,
          out->data<T>() + output_offset,
          out_stride,
          in.data<T>(),
          in_stride,
          in_stride[axis]);
W
wopeizl 已提交
683 684 685
      output_offset += in_stride[axis];
    }
  } else {
L
Leo Chen 已提交
686
    paddle::operators::math::ConcatFunctor<phi::CPUContext, T> concat_functor;
W
wopeizl 已提交
687 688 689 690
    concat_functor(ctx, ins, static_cast<int>(axis), out);
  }
}

691
inline void _getSliceinfo(const phi::DenseTensor &self,
692 693 694 695 696 697
                          py::object obj,
                          const int64_t dim,
                          int64_t *pstart,
                          int64_t *pstop,
                          int64_t *pstep,
                          int64_t *pslicelength) {
W
wopeizl 已提交
698 699 700 701 702
  auto &start = *pstart;
  auto &stop = *pstop;
  auto &step = *pstep;
  auto &slicelength = *pslicelength;
  const framework::DDim &srcDDim = self.dims();
Z
zyfncg 已提交
703 704 705 706
  PADDLE_ENFORCE(
      0 <= dim && dim < srcDDim.size(),
      platform::errors::OutOfRange("The dim %d of slice is out of bounds, it "
                                   "shound be in the range of [0, %d).",
707 708
                                   dim,
                                   srcDDim.size()));
Z
zyfncg 已提交
709

W
wopeizl 已提交
710 711 712 713
  if (py::isinstance<py::slice>(obj)) {
    size_t lstart, lstop, lstep, lslicelength;
    py::slice s = static_cast<py::slice>(obj);
    if (!s.compute(srcDDim[dim], &lstart, &lstop, &lstep, &lslicelength)) {
Z
zyfncg 已提交
714 715 716 717
      PADDLE_THROW(platform::errors::OutOfRange(
          "Slice on dim: %d is error, please check the validity of tensor "
          "dims or slice item.",
          dim));
W
wopeizl 已提交
718 719 720 721 722 723 724
    }
    start = static_cast<int64_t>(lstart);
    stop = static_cast<int64_t>(lstop);
    step = static_cast<int64_t>(lstep);
    slicelength = static_cast<int64_t>(lslicelength);
  } else if (py::isinstance<py::int_>(obj)) {
    start = static_cast<int64_t>(static_cast<py::int_>(obj));
Z
zyfncg 已提交
725 726 727 728
    PADDLE_ENFORCE(
        std::abs(start) < srcDDim[dim],
        platform::errors::OutOfRange("The start %d of slice is out of bounds, "
                                     "it shound be in the range of (%d, %d).",
729 730 731
                                     start,
                                     -srcDDim[dim],
                                     srcDDim[dim]));
W
wopeizl 已提交
732 733 734 735 736
    start = (start >= 0) ? start : srcDDim[dim] - start;
    stop = start + 1;
    step = 1;
    slicelength = 1;
  } else {
Z
zyfncg 已提交
737 738 739
    PADDLE_THROW(
        platform::errors::OutOfRange("Index object error, the index object for "
                                     "slice only supports slice(::) and int."));
W
wopeizl 已提交
740 741 742
  }
}

743 744 745
inline phi::DenseTensor *_getTensor(const phi::DenseTensor &self,
                                    const framework::DDim &ddim) {
  phi::DenseTensor *output = new phi::DenseTensor();
W
wopeizl 已提交
746 747 748
  output->Resize(ddim);
  auto place = self.place();
  if (platform::is_cpu_place(place)) {
749
    output->mutable_data(place, self.dtype());
750 751
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
752
    output->mutable_data(place, self.dtype());
753
#endif
W
wopeizl 已提交
754
  } else {
755
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
W
wopeizl 已提交
756
    if (platform::is_cuda_pinned_place(place)) {
757
      output->mutable_data(place, self.dtype());
W
wopeizl 已提交
758
    } else if ((platform::is_gpu_place(place))) {
759
      output->mutable_data(place, self.dtype());
W
wopeizl 已提交
760 761 762 763 764 765 766
    }
#endif
  }
  return output;
}

template <typename T>
767 768
void _sliceDapper(const phi::DenseTensor *in,
                  phi::DenseTensor *out,
L
Leo Chen 已提交
769
                  const phi::CPUContext &ctx,
770 771
                  const std::vector<int> &axes,
                  const std::vector<int> &starts,
W
wopeizl 已提交
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
                  int size) {
  switch (size) {
    case 1:
      _sliceCompute<T, 1>(in, out, ctx, axes, starts);
      break;
    case 2:
      _sliceCompute<T, 2>(in, out, ctx, axes, starts);
      break;
    case 3:
      _sliceCompute<T, 3>(in, out, ctx, axes, starts);
      break;
    case 4:
      _sliceCompute<T, 4>(in, out, ctx, axes, starts);
      break;
    case 5:
      _sliceCompute<T, 5>(in, out, ctx, axes, starts);
      break;
    case 6:
      _sliceCompute<T, 6>(in, out, ctx, axes, starts);
      break;
    case 7:
      _sliceCompute<T, 7>(in, out, ctx, axes, starts);
      break;
    case 8:
      _sliceCompute<T, 8>(in, out, ctx, axes, starts);
      break;
    case 9:
      _sliceCompute<T, 9>(in, out, ctx, axes, starts);
      break;
    default:
802 803
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The dim size should be 1 to 9, current is %d", size));
W
wopeizl 已提交
804 805 806 807 808
      break;
  }
}

template <typename T>
809 810 811 812 813 814
inline phi::DenseTensor *_sliceWrapper(const phi::DenseTensor &self,
                                       const phi::CPUContext &ctx,
                                       py::object obj,
                                       int dim,
                                       int64_t start,
                                       int64_t slicelength) {
W
wopeizl 已提交
815 816 817 818
  framework::DDim dstDDim = self.dims();
  dstDDim[dim] = static_cast<int64_t>(slicelength);
  std::vector<int> axes({dim});
  std::vector<int> starts({static_cast<int>(start)});
819
  phi::DenseTensor *output = _getTensor(self, dstDDim);
W
wopeizl 已提交
820 821 822 823 824
  _sliceDapper<T>(&self, output, ctx, axes, starts, dstDDim.size());
  return output;
}

template <typename T>
825 826 827
inline phi::DenseTensor *_sliceAndConcat(const phi::DenseTensor &self,
                                         py::object obj,
                                         int dim) {
L
Leo Chen 已提交
828
  phi::CPUContext ctx;
W
wopeizl 已提交
829 830 831 832 833
  int64_t start, stop, step, slicelength;
  _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength);
  if (step == 1 || slicelength == 1) {
    return _sliceWrapper<T>(self, ctx, obj, dim, start, slicelength);
  } else {
834
    std::vector<phi::DenseTensor> ins;
W
wopeizl 已提交
835 836 837 838 839 840 841
    for (auto i = 0; i < slicelength; ++i, start += step) {
      ins.emplace_back(*_sliceWrapper<T>(self, ctx, obj, dim, start, 1));
    }

    // do the concat operation
    framework::DDim dstDDim = self.dims();
    dstDDim[dim] = static_cast<int64_t>(slicelength);
842
    phi::DenseTensor *output1 = _getTensor(self, dstDDim);
W
wopeizl 已提交
843 844 845 846 847
    _concatCompute<T>(ins, output1, ctx, dim);
    return output1;
  }
}

848 849 850
inline phi::DenseTensor *_sliceTensor(const phi::DenseTensor &self,
                                      py::object obj,
                                      int dim) {
851
  auto src_type = framework::TransToProtoVarType(self.dtype());
W
wopeizl 已提交
852 853 854
  switch (src_type) {
    case framework::proto::VarType::FP16:
      return _sliceAndConcat<paddle::platform::float16>(self, obj, dim);
855 856
    case framework::proto::VarType::BF16:
      return _sliceAndConcat<paddle::platform::bfloat16>(self, obj, dim);
857
    case framework::proto::VarType::COMPLEX64:
858
      return _sliceAndConcat<paddle::platform::complex<float>>(self, obj, dim);
859
    case framework::proto::VarType::COMPLEX128:
860
      return _sliceAndConcat<paddle::platform::complex<double>>(self, obj, dim);
W
wopeizl 已提交
861 862 863 864
    case framework::proto::VarType::FP32:
      return _sliceAndConcat<float>(self, obj, dim);
    case framework::proto::VarType::FP64:
      return _sliceAndConcat<double>(self, obj, dim);
L
Leo Chen 已提交
865 866 867 868
    case framework::proto::VarType::INT8:
      return _sliceAndConcat<int8_t>(self, obj, dim);
    case framework::proto::VarType::INT16:
      return _sliceAndConcat<int16_t>(self, obj, dim);
W
wopeizl 已提交
869 870 871 872 873 874 875
    case framework::proto::VarType::INT32:
      return _sliceAndConcat<int>(self, obj, dim);
    case framework::proto::VarType::INT64:
      return _sliceAndConcat<int64_t>(self, obj, dim);
    case framework::proto::VarType::BOOL:
      return _sliceAndConcat<bool>(self, obj, dim);
    case framework::proto::VarType::UINT8:
L
Leo Chen 已提交
876
      return _sliceAndConcat<uint8_t>(self, obj, dim);
W
wopeizl 已提交
877
    default:
878 879 880
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Not support tensor type: %s",
          framework::DataTypeToString(src_type)));
W
wopeizl 已提交
881 882 883
  }
}

884 885
inline phi::DenseTensor *_pySliceTensor(const phi::DenseTensor &self,
                                        py::object obj) {
W
wopeizl 已提交
886 887
  if (py::isinstance<py::tuple>(obj)) {
    py::list l = static_cast<py::list>(obj);
888 889
    std::unique_ptr<phi::DenseTensor> target;
    phi::DenseTensor *src = const_cast<phi::DenseTensor *>(&self);
W
wopeizl 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902 903
    for (auto i = 0; i < static_cast<int>(l.size()); ++i) {
      src = _sliceTensor(*src, l[i], i);
      if (i + 1 == static_cast<int>(l.size())) {
        return src;
      } else {
        target.reset(src);
      }
    }
    return nullptr;
  } else {
    return _sliceTensor(self, obj, 0);
  }
}

904 905
inline phi::DenseTensor *PySliceTensor(const phi::DenseTensor &self,
                                       py::object obj) {
W
wopeizl 已提交
906
  if (platform::is_gpu_place(self.place())) {
907 908
    std::unique_ptr<phi::DenseTensor> holder;
    phi::DenseTensor src;
W
wopeizl 已提交
909
    framework::TensorCopySync(self, platform::CPUPlace(), &src);
910
    phi::DenseTensor *output = _pySliceTensor(src, obj);
W
wopeizl 已提交
911
    holder.reset(output);
912
    phi::DenseTensor *dst = _getTensor(*output, output->dims());
W
wopeizl 已提交
913 914 915 916 917 918 919
    framework::TensorCopySync(*output, self.place(), dst);
    return dst;
  } else {
    return _pySliceTensor(self, obj);
  }
}

920
inline py::array TensorToPyArray(const phi::DenseTensor &tensor,
921
                                 bool need_deep_copy = false) {
Q
qingqing01 已提交
922 923 924
  if (!tensor.IsInitialized()) {
    return py::array();
  }
925
  bool is_gpu_tensor = platform::is_gpu_place(tensor.place());
926
  bool is_xpu_tensor = platform::is_xpu_place(tensor.place());
927
  bool is_npu_tensor = platform::is_npu_place(tensor.place());
928
  bool is_mlu_tensor = platform::is_mlu_place(tensor.place());
929
  bool is_custom_device_tensor = platform::is_custom_place(tensor.place());
930
  const auto &tensor_dims = tensor.dims();
931
  auto tensor_dtype = framework::TransToProtoVarType(tensor.dtype());
932 933 934 935 936 937 938
  size_t sizeof_dtype = framework::SizeOfType(tensor_dtype);

  std::vector<size_t> py_dims(tensor_dims.size());
  std::vector<size_t> py_strides(tensor_dims.size());

  size_t numel = 1;
  for (int i = tensor_dims.size() - 1; i >= 0; --i) {
939
    py_dims[i] = static_cast<size_t>(tensor_dims[i]);
940 941 942 943
    py_strides[i] = sizeof_dtype * numel;
    numel *= py_dims[i];
  }

944
  const void *tensor_buf_ptr = tensor.data();
945

946 947
  std::string py_dtype_str = details::TensorDTypeToPyDTypeStr(
      framework::TransToProtoVarType(tensor.dtype()));
948

949 950
  if (!is_gpu_tensor && !is_xpu_tensor && !is_npu_tensor && !is_mlu_tensor &&
      !is_custom_device_tensor) {
951
    if (!need_deep_copy) {
952
      auto base = py::cast(std::move(tensor));
953 954 955 956 957
      return py::array(py::dtype(py_dtype_str.c_str()),
                       py_dims,
                       py_strides,
                       const_cast<void *>(tensor_buf_ptr),
                       base);
958 959
    } else {
      py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
960
      PADDLE_ENFORCE_EQ(
961 962
          py_arr.writeable(),
          true,
963 964 965 966
          platform::errors::InvalidArgument(
              "PyArray is not writable, in which case memory leak "
              "or double free would occur"));
      PADDLE_ENFORCE_EQ(
967 968
          py_arr.owndata(),
          true,
969 970 971
          platform::errors::InvalidArgument(
              "PyArray does not own data, in which case  memory leak "
              "or double free would occur"));
972 973
      platform::CPUPlace place;
      size_t copy_bytes = sizeof_dtype * numel;
974 975
      paddle::memory::Copy(
          place, py_arr.mutable_data(), place, tensor_buf_ptr, copy_bytes);
976 977
      return py_arr;
    }
978 979 980
  } else if (is_xpu_tensor) {
#ifdef PADDLE_WITH_XPU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
981 982
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
983 984 985 986
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
987 988
        py_arr.owndata(),
        true,
989 990 991 992 993
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
994
    auto p = tensor.place();
995 996 997 998 999
    paddle::memory::Copy(platform::CPUPlace(),
                         py_arr.mutable_data(),
                         p,
                         tensor_buf_ptr,
                         copy_bytes);
1000 1001 1002 1003 1004 1005 1006
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
  } else if (is_gpu_tensor) {
1007
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1008
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1009 1010
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
1011 1012 1013 1014
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
1015 1016
        py_arr.owndata(),
        true,
1017 1018 1019 1020 1021
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
1022
    auto p = tensor.place();
1023 1024 1025 1026 1027 1028
    paddle::memory::Copy(platform::CPUPlace(),
                         py_arr.mutable_data(),
                         p,
                         tensor_buf_ptr,
                         copy_bytes,
                         nullptr);
1029
    return py_arr;
1030
#else
1031 1032 1033
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CUDAPlace in CPU only version, "
        "Please recompile or reinstall Paddle with CUDA support."));
1034 1035 1036 1037
#endif
  } else if (is_custom_device_tensor) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
1038 1039
    PADDLE_ENFORCE_EQ(py_arr.writeable(),
                      true,
1040 1041 1042 1043
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
1044 1045
        py_arr.owndata(),
        true,
1046 1047 1048 1049
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

1050 1051
    // TODO(qili93): temporary for ascned npu performance to be removed along
    // with npu_identity op
1052
    paddle::Tensor tensor_out(std::make_shared<phi::DenseTensor>());
1053
    if (tensor.storage_properties_initialized()) {
1054
      paddle::Tensor tensor_in(std::make_shared<phi::DenseTensor>(tensor));
1055 1056 1057 1058 1059 1060
      tensor_out = npu_identity_ad_func(tensor_in, -1);
      auto dense_tensor =
          std::dynamic_pointer_cast<phi::DenseTensor>(tensor_out.impl());
      tensor_buf_ptr = dense_tensor->data();
    }

1061 1062 1063 1064
    size_t copy_bytes = sizeof_dtype * numel;
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &ctx = *pool.Get(tensor.place());
    paddle::memory::Copy(
1065 1066 1067 1068 1069
        platform::CPUPlace(),
        py_arr.mutable_data(),
        tensor.place(),
        tensor_buf_ptr,
        copy_bytes,
1070 1071 1072 1073 1074 1075 1076 1077
        reinterpret_cast<const platform::CustomDeviceContext &>(ctx).stream());
    ctx.Wait();
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CustomPlace in CPU/GPU/XPU/NPU version, "
        "Please recompile or reinstall Paddle with CustomPlace "
        "support."));
1078
#endif
1079 1080 1081
  }
  PADDLE_THROW(platform::errors::Unimplemented("Place is not supported"));
  return py::array();
1082 1083
}

1084 1085
}  // namespace pybind
}  // namespace paddle