tensor_py.h 23.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
16

L
Luo Tao 已提交
17
#include <Python.h>
W
wopeizl 已提交
18 19
#include <algorithm>
#include <memory>
Q
qijun 已提交
20
#include <string>
C
chengduoZH 已提交
21
#include <tuple>
22
#include <utility>
C
chengduoZH 已提交
23
#include <vector>
24
#include "paddle/fluid/framework/data_type.h"
Y
Yi Wang 已提交
25 26
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h"
W
wopeizl 已提交
27 28
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/platform/device_context.h"
30
#include "paddle/fluid/platform/float16.h"
Q
qijun 已提交
31 32
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
33

W
wopeizl 已提交
34 35
namespace py = pybind11;

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
namespace pybind11 {
namespace detail {

// Note: use same enum number of float16 in numpy.
// import numpy as np
// print np.dtype(np.float16).num  # 23
constexpr int NPY_FLOAT16_ = 23;

// Note: Since float16 is not a builtin type in C++, we register
// paddle::platform::float16 as numpy.float16.
// Ref: https://github.com/pybind/pybind11/issues/1776
template <>
struct npy_format_descriptor<paddle::platform::float16> {
  static py::dtype dtype() {
    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16_);
    return reinterpret_borrow<py::dtype>(ptr);
  }
  static std::string format() {
    // Note: "e" represents float16.
    // Details at:
    // https://docs.python.org/3/library/struct.html#format-characters.
    return "e";
  }
  static PYBIND11_DESCR name() { return _("float16"); }
};

}  // namespace detail
}  // namespace pybind11

65
namespace paddle {
66
namespace pybind {
67

68 69
namespace details {

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
template <typename T>
class PYBIND11_HIDDEN NumpyAllocation : public memory::Allocation {
 public:
  explicit NumpyAllocation(const py::array &arr)
      : Allocation(const_cast<void *>(arr.data()), sizeof(T) * (arr.size()),
                   paddle::platform::CPUPlace()),
        arr_(arr.ptr()) {
    PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument(
                                      "The underlying PyObject pointer of "
                                      "numpy array cannot be nullptr"));
    PADDLE_ENFORCE_NE(
        arr_, Py_None,
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~NumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject *arr_;
};

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
template <typename T>
struct ValidDTypeToPyArrayChecker {
  static constexpr bool kValue = false;
};

#define DECLARE_VALID_DTYPE_TO_PY_ARRAY(type) \
  template <>                                 \
  struct ValidDTypeToPyArrayChecker<type> {   \
    static constexpr bool kValue = true;      \
  }

DECLARE_VALID_DTYPE_TO_PY_ARRAY(platform::float16);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(float);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(double);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(bool);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int8_t);
L
Leo Chen 已提交
111
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int16_t);
112 113
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int);
DECLARE_VALID_DTYPE_TO_PY_ARRAY(int64_t);
L
Leo Chen 已提交
114
DECLARE_VALID_DTYPE_TO_PY_ARRAY(uint8_t);
115 116 117 118 119 120 121 122 123

inline std::string TensorDTypeToPyDTypeStr(
    framework::proto::VarType::Type type) {
#define TENSOR_DTYPE_TO_PY_DTYPE(T, proto_type)                             \
  if (type == proto_type) {                                                 \
    if (std::is_same<T, platform::float16>::value) {                        \
      return "e";                                                           \
    } else {                                                                \
      constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
124 125 126 127 128
      PADDLE_ENFORCE_EQ(                                                    \
          kIsValidDType, true,                                              \
          platform::errors::Unimplemented(                                  \
              "This type [%s] of tensor cannot be expose to Python",        \
              typeid(T).name()));                                           \
129 130 131 132 133 134
      return py::format_descriptor<T>::format();                            \
    }                                                                       \
  }

  _ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE
135 136
  PADDLE_THROW(platform::errors::Unimplemented(
      "Unsupported tensor data type: %s", framework::DataTypeToString(type)));
137 138 139 140
}

}  // namespace details

141
template <typename T>
142
T TensorGetElement(const framework::Tensor &self, size_t offset) {
143 144 145
  PADDLE_ENFORCE_LT(offset, self.numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
Q
qingqing01 已提交
146
  T b = static_cast<T>(0);
147
  if (platform::is_cpu_place(self.place())) {
Q
qingqing01 已提交
148
    b = self.data<T>()[offset];
149 150 151 152 153 154 155
  } else if (platform::is_xpu_place(self.place())) {
#ifdef PADDLE_WITH_XPU
    const T *a = self.data<T>();
    auto p = BOOST_GET_CONST(platform::XPUPlace, self.place());
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self.place())) {
Q
qingqing01 已提交
156 157
#ifdef PADDLE_WITH_CUDA
    const T *a = self.data<T>();
158
    auto p = BOOST_GET_CONST(platform::CUDAPlace, self.place());
Q
qingqing01 已提交
159 160 161
    paddle::memory::Copy(platform::CPUPlace(), &b, p, a + offset, sizeof(T),
                         nullptr);
#endif
162
  }
Q
qingqing01 已提交
163
  return b;
164 165 166
}

template <typename T>
167
void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
168 169 170
  PADDLE_ENFORCE_LT(offset, self->numel(),
                    platform::errors::InvalidArgument(
                        "The offset exceeds the size of tensor."));
Q
qingqing01 已提交
171
  if (platform::is_cpu_place(self->place())) {
Y
Yu Yang 已提交
172
    self->mutable_data<T>(self->place())[offset] = elem;
173 174 175 176 177 178 179
  } else if (platform::is_xpu_place(self->place())) {
#ifdef PADDLE_WITH_XPU
    auto p = BOOST_GET_CONST(platform::XPUPlace, self->place());
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T));
#endif
  } else if (platform::is_gpu_place(self->place())) {
Q
qingqing01 已提交
180
#ifdef PADDLE_WITH_CUDA
181
    auto p = BOOST_GET_CONST(platform::CUDAPlace, self->place());
Q
qingqing01 已提交
182 183 184 185
    T *a = self->mutable_data<T>(p);
    paddle::memory::Copy(p, a + offset, platform::CPUPlace(), &elem, sizeof(T),
                         nullptr);
#endif
186
  }
187 188
}

189 190 191
template <typename T, typename P>
void SetTensorFromPyArrayT(
    framework::Tensor *self,
192
    const py::array_t<T, py::array::c_style | py::array::forcecast> &array,
193
    const P &place, bool zero_copy) {
194 195 196 197 198 199 200 201
  std::vector<int64_t> dims;
  dims.reserve(array.ndim());
  for (decltype(array.ndim()) i = 0; i < array.ndim(); ++i) {
    dims.push_back(static_cast<int>(array.shape()[i]));
  }
  self->Resize(framework::make_ddim(dims));

  if (paddle::platform::is_cpu_place(place)) {
202 203 204 205 206 207 208 209
    if (zero_copy) {
      auto holder = std::make_shared<details::NumpyAllocation<T>>(array);
      auto type = framework::ToDataType(std::type_index(typeid(T)));
      self->ResetHolderWithType(holder, type);
    } else {
      auto dst = self->mutable_data<T>(place);
      std::memcpy(dst, array.data(), array.nbytes());
    }
210 211 212 213 214 215 216 217 218 219
  } else if (paddle::platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
    auto dst = self->mutable_data<T>(place);
    xpu_memcpy(dst, array.data(), array.nbytes(),
               XPUMemcpyKind::XPU_HOST_TO_DEVICE);
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
220 221
  } else {
#ifdef PADDLE_WITH_CUDA
222
    auto dst = self->mutable_data<T>(place);
223 224 225
    if (paddle::platform::is_cuda_pinned_place(place)) {
      std::memcpy(dst, array.data(), array.nbytes());
    } else if (paddle::platform::is_gpu_place(place)) {
226 227
      paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
                                      cudaMemcpyHostToDevice);
228
    } else {
229 230 231
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible place type: Tensor.set() supports "
          "CPUPlace, CUDAPlace "
232
          "and CUDAPinnedPlace, but got %s!",
233
          place));
234 235
    }
#else
236
    PADDLE_THROW(platform::errors::PermissionDenied(
237
        "Cannot use CUDAPlace or CUDAPinnedPlace in CPU only version, "
238
        "Please recompile or reinstall Paddle with CUDA support."));
239 240 241 242 243
#endif
  }
}

template <typename P>
244
void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
245
                          const P &place, bool zero_copy) {
246
  auto array = obj.cast<py::array>();
247
  if (py::isinstance<py::array_t<float>>(array)) {
248
    SetTensorFromPyArrayT<float, P>(self, array, place, zero_copy);
249
  } else if (py::isinstance<py::array_t<int>>(array)) {
250
    SetTensorFromPyArrayT<int, P>(self, array, place, zero_copy);
251
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
252
    SetTensorFromPyArrayT<int64_t, P>(self, array, place, zero_copy);
253
  } else if (py::isinstance<py::array_t<double>>(array)) {
254
    SetTensorFromPyArrayT<double, P>(self, array, place, zero_copy);
255
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
256
    SetTensorFromPyArrayT<int8_t, P>(self, array, place, zero_copy);
L
Leo Chen 已提交
257 258
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetTensorFromPyArrayT<int16_t, P>(self, array, place, zero_copy);
259
  } else if (py::isinstance<py::array_t<uint8_t>>(array)) {
260
    SetTensorFromPyArrayT<uint8_t, P>(self, array, place, zero_copy);
261
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
262 263
    SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
                                                        zero_copy);
264
  } else if (py::isinstance<py::array_t<uint16_t>>(array)) {
L
Leo Chen 已提交
265 266
    // TODO(cql): temporary keeping uint16, which is used for casting float16
    // before. It should be depracated later.
267 268
    SetTensorFromPyArrayT<paddle::platform::float16, P>(self, array, place,
                                                        zero_copy);
269
  } else if (py::isinstance<py::array_t<bool>>(array)) {
270
    SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
271
  } else {
272 273
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning,
274
    PADDLE_THROW(platform::errors::InvalidArgument(
275 276 277 278
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64, uint8 or uint16, "
        "please check your input or input array data type."));
279 280 281
  }
}

W
wopeizl 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
template <typename T, size_t D>
void _sliceCompute(const framework::Tensor *in, framework::Tensor *out,
                   const platform::CPUDeviceContext &ctx,
                   const std::vector<int> &axes,
                   const std::vector<int> &starts) {
  auto &eigen_place = *ctx.eigen_device();
  auto place = in->place();
  auto out_dims = out->dims();
  auto in_dims = in->dims();

  auto offsets = Eigen::array<int, D>();
  auto extents = Eigen::array<int, D>();
  for (size_t i = 0; i < D; ++i) {
    offsets[i] = 0;
    extents[i] = out_dims[i];
  }
  int start;
  for (size_t i = 0; i < axes.size(); ++i) {
    start = starts[i];
    if (start < 0) {
      start = (start + in_dims[axes[i]]);
    }
    start = std::max(start, 0);
    offsets[axes[i]] = start;
  }
  auto in_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *in);
  auto out_t =
      framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
          *out);
  out_t.device(eigen_place) = in_t.slice(offsets, extents);
}

template <typename T>
void _concatCompute(const std::vector<paddle::framework::Tensor> &ins,
                    paddle::framework::Tensor *out,
                    const platform::CPUDeviceContext &ctx, int64_t axis) {
  if (axis == 0 && ins.size() < 10) {
    size_t output_offset = 0;
    for (auto &in : ins) {
      auto in_stride = framework::stride_numel(in.dims());
      auto out_stride = framework::stride_numel(out->dims());
      paddle::operators::StridedNumelCopyWithAxis<T>(
          ctx, axis, out->data<T>() + output_offset, out_stride, in.data<T>(),
          in_stride, in_stride[axis]);
      output_offset += in_stride[axis];
    }
  } else {
    paddle::operators::math::ConcatFunctor<platform::CPUDeviceContext, T>
        concat_functor;
    concat_functor(ctx, ins, static_cast<int>(axis), out);
  }
}

L
Leo Chen 已提交
337 338 339
inline void _getSliceinfo(const framework::Tensor &self, py::object obj,
                          const int64_t dim, int64_t *pstart, int64_t *pstop,
                          int64_t *pstep, int64_t *pslicelength) {
W
wopeizl 已提交
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
  auto &start = *pstart;
  auto &stop = *pstop;
  auto &step = *pstep;
  auto &slicelength = *pslicelength;
  const framework::DDim &srcDDim = self.dims();
  if (dim < 0 || dim >= srcDDim.size()) {
    throw py::index_error();
  }
  if (py::isinstance<py::slice>(obj)) {
    size_t lstart, lstop, lstep, lslicelength;
    py::slice s = static_cast<py::slice>(obj);
    if (!s.compute(srcDDim[dim], &lstart, &lstop, &lstep, &lslicelength)) {
      throw py::index_error();
    }
    start = static_cast<int64_t>(lstart);
    stop = static_cast<int64_t>(lstop);
    step = static_cast<int64_t>(lstep);
    slicelength = static_cast<int64_t>(lslicelength);
  } else if (py::isinstance<py::int_>(obj)) {
    start = static_cast<int64_t>(static_cast<py::int_>(obj));
    if (std::abs(start) >= srcDDim[dim]) {
      throw py::index_error();
    }
    start = (start >= 0) ? start : srcDDim[dim] - start;
    stop = start + 1;
    step = 1;
    slicelength = 1;
  } else {
    throw py::index_error();
  }
}

inline framework::Tensor *_getTensor(const framework::Tensor &self,
                                     const framework::DDim &ddim) {
  framework::Tensor *output = new framework::Tensor();
  output->Resize(ddim);
  auto place = self.place();
  if (platform::is_cpu_place(place)) {
378 379
    output->mutable_data(BOOST_GET_CONST(platform::CPUPlace, place),
                         self.type());
380 381 382 383 384
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
    output->mutable_data(BOOST_GET_CONST(platform::XPUPlace, place),
                         self.type());
#endif
W
wopeizl 已提交
385
  } else {
386
#ifdef PADDLE_WITH_CUDA
W
wopeizl 已提交
387
    if (platform::is_cuda_pinned_place(place)) {
388
      output->mutable_data(BOOST_GET_CONST(platform::CUDAPinnedPlace, place),
W
wopeizl 已提交
389 390
                           self.type());
    } else if ((platform::is_gpu_place(place))) {
391 392
      output->mutable_data(BOOST_GET_CONST(platform::CUDAPlace, place),
                           self.type());
W
wopeizl 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
    }
#endif
  }
  return output;
}

template <typename T>
void _sliceDapper(const framework::Tensor *in, framework::Tensor *out,
                  const platform::CPUDeviceContext &ctx,
                  const std::vector<int> &axes, const std::vector<int> &starts,
                  int size) {
  switch (size) {
    case 1:
      _sliceCompute<T, 1>(in, out, ctx, axes, starts);
      break;
    case 2:
      _sliceCompute<T, 2>(in, out, ctx, axes, starts);
      break;
    case 3:
      _sliceCompute<T, 3>(in, out, ctx, axes, starts);
      break;
    case 4:
      _sliceCompute<T, 4>(in, out, ctx, axes, starts);
      break;
    case 5:
      _sliceCompute<T, 5>(in, out, ctx, axes, starts);
      break;
    case 6:
      _sliceCompute<T, 6>(in, out, ctx, axes, starts);
      break;
    case 7:
      _sliceCompute<T, 7>(in, out, ctx, axes, starts);
      break;
    case 8:
      _sliceCompute<T, 8>(in, out, ctx, axes, starts);
      break;
    case 9:
      _sliceCompute<T, 9>(in, out, ctx, axes, starts);
      break;
    default:
433 434
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The dim size should be 1 to 9, current is %d", size));
W
wopeizl 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
      break;
  }
}

template <typename T>
inline framework::Tensor *_sliceWrapper(const framework::Tensor &self,
                                        const platform::CPUDeviceContext &ctx,
                                        py::object obj, int dim, int64_t start,
                                        int64_t slicelength) {
  framework::DDim dstDDim = self.dims();
  dstDDim[dim] = static_cast<int64_t>(slicelength);
  std::vector<int> axes({dim});
  std::vector<int> starts({static_cast<int>(start)});
  framework::Tensor *output = _getTensor(self, dstDDim);
  _sliceDapper<T>(&self, output, ctx, axes, starts, dstDDim.size());
  return output;
}

template <typename T>
inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self,
                                          py::object obj, int dim) {
  platform::CPUDeviceContext ctx;
  int64_t start, stop, step, slicelength;
  _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength);
  if (step == 1 || slicelength == 1) {
    return _sliceWrapper<T>(self, ctx, obj, dim, start, slicelength);
  } else {
    std::vector<framework::Tensor> ins;
    for (auto i = 0; i < slicelength; ++i, start += step) {
      ins.emplace_back(*_sliceWrapper<T>(self, ctx, obj, dim, start, 1));
    }

    // do the concat operation
    framework::DDim dstDDim = self.dims();
    dstDDim[dim] = static_cast<int64_t>(slicelength);
    framework::Tensor *output1 = _getTensor(self, dstDDim);
    _concatCompute<T>(ins, output1, ctx, dim);
    return output1;
  }
}

inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
                                       py::object obj, int dim) {
  auto src_type = self.type();
  switch (src_type) {
    case framework::proto::VarType::FP16:
      return _sliceAndConcat<paddle::platform::float16>(self, obj, dim);
    case framework::proto::VarType::FP32:
      return _sliceAndConcat<float>(self, obj, dim);
    case framework::proto::VarType::FP64:
      return _sliceAndConcat<double>(self, obj, dim);
L
Leo Chen 已提交
486 487 488 489
    case framework::proto::VarType::INT8:
      return _sliceAndConcat<int8_t>(self, obj, dim);
    case framework::proto::VarType::INT16:
      return _sliceAndConcat<int16_t>(self, obj, dim);
W
wopeizl 已提交
490 491 492 493 494 495 496
    case framework::proto::VarType::INT32:
      return _sliceAndConcat<int>(self, obj, dim);
    case framework::proto::VarType::INT64:
      return _sliceAndConcat<int64_t>(self, obj, dim);
    case framework::proto::VarType::BOOL:
      return _sliceAndConcat<bool>(self, obj, dim);
    case framework::proto::VarType::UINT8:
L
Leo Chen 已提交
497
      return _sliceAndConcat<uint8_t>(self, obj, dim);
W
wopeizl 已提交
498
    default:
499 500 501
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Not support tensor type: %s",
          framework::DataTypeToString(src_type)));
W
wopeizl 已提交
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
  }
}

inline framework::Tensor *_pySliceTensor(const framework::Tensor &self,
                                         py::object obj) {
  if (py::isinstance<py::tuple>(obj)) {
    py::list l = static_cast<py::list>(obj);
    std::unique_ptr<framework::Tensor> target;
    framework::Tensor *src = const_cast<framework::Tensor *>(&self);
    for (auto i = 0; i < static_cast<int>(l.size()); ++i) {
      src = _sliceTensor(*src, l[i], i);
      if (i + 1 == static_cast<int>(l.size())) {
        return src;
      } else {
        target.reset(src);
      }
    }
    return nullptr;
  } else {
    return _sliceTensor(self, obj, 0);
  }
}

inline framework::Tensor *PySliceTensor(const framework::Tensor &self,
                                        py::object obj) {
  if (platform::is_gpu_place(self.place())) {
    std::unique_ptr<framework::Tensor> holder;
    framework::Tensor src;
    framework::TensorCopySync(self, platform::CPUPlace(), &src);
    framework::Tensor *output = _pySliceTensor(src, obj);
    holder.reset(output);
    framework::Tensor *dst = _getTensor(*output, output->dims());
    framework::TensorCopySync(*output, self.place(), dst);
    return dst;
  } else {
    return _pySliceTensor(self, obj);
  }
}

541 542
inline py::array TensorToPyArray(const framework::Tensor &tensor,
                                 bool need_deep_copy = false) {
Q
qingqing01 已提交
543 544 545
  if (!tensor.IsInitialized()) {
    return py::array();
  }
546
  bool is_gpu_tensor = platform::is_gpu_place(tensor.place());
547
  bool is_xpu_tensor = platform::is_xpu_place(tensor.place());
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
  const auto &tensor_dims = tensor.dims();
  auto tensor_dtype = tensor.type();
  size_t sizeof_dtype = framework::SizeOfType(tensor_dtype);

  std::vector<size_t> py_dims(tensor_dims.size());
  std::vector<size_t> py_strides(tensor_dims.size());

  size_t numel = 1;
  for (int i = tensor_dims.size() - 1; i >= 0; --i) {
    py_dims[i] = (size_t)tensor_dims[i];
    py_strides[i] = sizeof_dtype * numel;
    numel *= py_dims[i];
  }

  const void *tensor_buf_ptr = tensor.data<void>();

  std::string py_dtype_str = details::TensorDTypeToPyDTypeStr(tensor.type());

566
  if (!is_gpu_tensor && !is_xpu_tensor) {
567
    if (!need_deep_copy) {
568 569 570
      auto base = py::cast(std::move(tensor));
      return py::array(py::dtype(py_dtype_str.c_str()), py_dims, py_strides,
                       const_cast<void *>(tensor_buf_ptr), base);
571 572
    } else {
      py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
573 574 575 576 577 578 579 580 581 582
      PADDLE_ENFORCE_EQ(
          py_arr.writeable(), true,
          platform::errors::InvalidArgument(
              "PyArray is not writable, in which case memory leak "
              "or double free would occur"));
      PADDLE_ENFORCE_EQ(
          py_arr.owndata(), true,
          platform::errors::InvalidArgument(
              "PyArray does not own data, in which case  memory leak "
              "or double free would occur"));
583 584 585 586 587 588
      platform::CPUPlace place;
      size_t copy_bytes = sizeof_dtype * numel;
      paddle::memory::Copy(place, py_arr.mutable_data(), place, tensor_buf_ptr,
                           copy_bytes);
      return py_arr;
    }
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
  } else if (is_xpu_tensor) {
#ifdef PADDLE_WITH_XPU
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
    auto p = BOOST_GET_CONST(platform::XPUPlace, tensor.place());
    paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p,
                         tensor_buf_ptr, copy_bytes);
    return py_arr;
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use XPUPlace in CPU/GPU version, "
        "Please recompile or reinstall Paddle with XPU support."));
#endif
  } else if (is_gpu_tensor) {
613
#ifdef PADDLE_WITH_CUDA
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
    py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
    PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
                      platform::errors::InvalidArgument(
                          "PyArray is not writable, in which case memory leak "
                          "or double free would occur"));
    PADDLE_ENFORCE_EQ(
        py_arr.owndata(), true,
        platform::errors::InvalidArgument(
            "PyArray does not own data, in which case  memory leak "
            "or double free would occur"));

    size_t copy_bytes = sizeof_dtype * numel;
    paddle::platform::GpuMemcpySync(py_arr.mutable_data(), tensor_buf_ptr,
                                    copy_bytes, cudaMemcpyDeviceToHost);
    return py_arr;
629
#else
630 631 632
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Cannot use CUDAPlace in CPU only version, "
        "Please recompile or reinstall Paddle with CUDA support."));
633
#endif
634 635 636
  }
  PADDLE_THROW(platform::errors::Unimplemented("Place is not supported"));
  return py::array();
637 638
}

639 640
}  // namespace pybind
}  // namespace paddle