tensor_py.h 5.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#pragma once
Q
qijun 已提交
16
#include <string>
D
dzhwinter 已提交
17
#include "paddle/framework/executor.h"
Q
qijun 已提交
18 19 20 21
#include "paddle/framework/tensor.h"
#include "paddle/memory/memcpy.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
22 23 24 25 26

namespace py = pybind11;

namespace paddle {

27
namespace pybind {
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45

namespace details {

template <bool less, size_t I, typename... ARGS>
struct CastToPyBufferImpl;

template <size_t I, typename... ARGS>
struct CastToPyBufferImpl<false, I, ARGS...> {
  py::buffer_info operator()(framework::Tensor &tensor) {
    PADDLE_THROW("This type of tensor cannot be expose to Python");
    return py::buffer_info();
  }
};

template <size_t I, typename... ARGS>
struct CastToPyBufferImpl<true, I, ARGS...> {
  using CUR_TYPE = typename std::tuple_element<I, std::tuple<ARGS...>>::type;
  py::buffer_info operator()(framework::Tensor &tensor) {
Y
Yu Yang 已提交
46
    if (std::type_index(typeid(CUR_TYPE)) == tensor.type()) {
47 48 49 50 51 52 53 54 55 56 57 58
      auto dim_vec = framework::vectorize(tensor.dims());
      std::vector<size_t> dims_outside;
      std::vector<size_t> strides;
      dims_outside.resize(dim_vec.size());
      strides.resize(dim_vec.size());

      size_t prod = 1;
      for (size_t i = dim_vec.size(); i != 0; --i) {
        dims_outside[i - 1] = (size_t)dim_vec[i - 1];
        strides[i - 1] = sizeof(CUR_TYPE) * prod;
        prod *= dims_outside[i - 1];
      }
Q
qijun 已提交
59
      framework::Tensor dst_tensor;
Y
Yu Yang 已提交
60
      if (paddle::platform::is_gpu_place(tensor.place())) {
61 62 63 64
#ifdef PADDLE_WITH_CUDA
        auto *src_ptr = static_cast<const void *>(tensor.data<CUR_TYPE>());
        auto *dst_ptr = static_cast<void *>(dst_tensor.mutable_data<CUR_TYPE>(
            tensor.dims(), platform::CPUPlace()));
D
dzhwinter 已提交
65 66 67 68 69 70 71 72 73

        framework::DeviceContextPool &pool =
            framework::DeviceContextPool::Get();
        auto dev_ctx = static_cast<const platform::CUDADeviceContext *>(
            pool.Borrow(tensor.place()));

        paddle::platform::GpuMemcpyAsync(
            dst_ptr, src_ptr, sizeof(CUR_TYPE) * tensor.numel(),
            cudaMemcpyDeviceToHost, dev_ctx->stream());
74 75 76
#else
        PADDLE_THROW("'GPUPlace' is not supported in CPU only device.");
#endif
Y
Yu Yang 已提交
77
      } else if (paddle::platform::is_cpu_place(tensor.place())) {
Q
qijun 已提交
78 79
        dst_tensor = tensor;
      }
80
      return py::buffer_info(
Y
Yu Yang 已提交
81
          dst_tensor.mutable_data<CUR_TYPE>(dst_tensor.place()),
82 83
          sizeof(CUR_TYPE), py::format_descriptor<CUR_TYPE>::format(),
          (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides);
84 85 86 87 88 89 90 91
    } else {
      constexpr bool less = I + 1 < std::tuple_size<std::tuple<ARGS...>>::value;
      return CastToPyBufferImpl<less, I + 1, ARGS...>()(tensor);
    }
  }
};
}  // namespace details
inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) {
92
  auto buffer_info =
Y
Yu Yang 已提交
93
      details::CastToPyBufferImpl<true, 0, float, int, double, int64_t, bool>()(
94
          tensor);
95 96 97
  return buffer_info;
}

98 99 100 101 102 103 104 105 106 107 108 109
template <typename T>
T TensorGetElement(framework::Tensor &self, size_t offset) {
  PADDLE_ENFORCE(platform::is_cpu_place(self.place()));
  return self.data<T>()[offset];
}

template <typename T>
void TensorSetElement(framework::Tensor &self, size_t offset, T elem) {
  PADDLE_ENFORCE(platform::is_cpu_place(self.place()));
  self.data<T>()[offset] = elem;
}

110
template <typename T>
Q
qijun 已提交
111
void PyCPUTensorSetFromArray(
112
    framework::Tensor &self,
Q
qijun 已提交
113 114
    py::array_t<T, py::array::c_style | py::array::forcecast> array,
    paddle::platform::CPUPlace &place) {
Q
qijun 已提交
115
  std::vector<int64_t> dims;
116 117 118 119 120
  dims.reserve(array.ndim());
  for (size_t i = 0; i < array.ndim(); ++i) {
    dims.push_back((int)array.shape()[i]);
  }

F
fengjiayi 已提交
121
  self.Resize(framework::make_ddim(dims));
Q
qijun 已提交
122
  auto *dst = self.mutable_data<T>(place);
123 124 125
  std::memcpy(dst, array.data(), sizeof(T) * array.size());
}

126
#ifdef PADDLE_WITH_CUDA
Q
qijun 已提交
127 128 129 130 131
template <typename T>
void PyCUDATensorSetFromArray(
    framework::Tensor &self,
    py::array_t<T, py::array::c_style | py::array::forcecast> array,
    paddle::platform::GPUPlace &place) {
Q
qijun 已提交
132
  std::vector<int64_t> dims;
Q
qijun 已提交
133 134 135
  dims.reserve(array.ndim());
  for (size_t i = 0; i < array.ndim(); ++i) {
    dims.push_back((int)array.shape()[i]);
Q
qijun 已提交
136
  }
Q
qijun 已提交
137 138 139

  self.Resize(framework::make_ddim(dims));
  auto *dst = self.mutable_data<T>(place);
D
dzhwinter 已提交
140 141 142 143 144 145

  framework::DeviceContextPool &pool = framework::DeviceContextPool::Get();
  auto dev_ctx =
      static_cast<const platform::CUDADeviceContext *>(pool.Borrow(place));
  paddle::platform::GpuMemcpyAsync(dst, array.data(), sizeof(T) * array.size(),
                                   cudaMemcpyHostToDevice, dev_ctx->stream());
146
}
Q
qijun 已提交
147
#endif
148 149 150

}  // namespace pybind
}  // namespace paddle