tensor_impl.h 9.1 KB
Newer Older
L
liaogang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include "paddle/memory/memcpy.h"
Y
Yan Chunwei 已提交
17
#include "paddle/platform/enforce.h"
L
liaogang 已提交
18 19 20 21

namespace paddle {
namespace framework {

22 23 24
template <typename... T>
struct SizeOfTypeFunctor;

L
liaogang 已提交
25
template <typename T>
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
struct SizeOfTypeFunctor<T> {
  size_t operator()(std::type_index type) const {
    if (typeid(T).hash_code() == type.hash_code()) {
      return sizeof(T);
    } else {
      return 0UL;
    }
  }
};

template <>
struct SizeOfTypeFunctor<> {
  size_t operator()(std::type_index type) const { return 0UL; }
};

template <typename HEAD, typename... TAIL>
struct SizeOfTypeFunctor<HEAD, TAIL...> {
  size_t operator()(std::type_index type) const {
    SizeOfTypeFunctor<HEAD> head;
    size_t head_size = head(type);
    if (head_size != 0) {
      return head_size;
    }
    SizeOfTypeFunctor<TAIL...> tail;
    return tail(type);
  }
};

static inline size_t SizeOfType(std::type_index type) {
  SizeOfTypeFunctor<int, float, double, int16_t, int64_t> functor;
  size_t size = functor(type);
  PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name());
  return size;
}

L
liaogang 已提交
61
inline void Tensor::check_memory_size() const {
Y
Yan Chunwei 已提交
62
  PADDLE_ENFORCE_NOT_NULL(
Z
Zhuoyuan 已提交
63
      holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
S
Superjom 已提交
64
  PADDLE_ENFORCE_GE(
65
      holder_->size(), numel() * SizeOfType(type()) + offset_,
S
Superjom 已提交
66 67 68
      "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
      "first to re-allocate memory.\n"
      "or maybe the required data-type mismatches the data already stored.");
L
liaogang 已提交
69 70 71 72
}

template <typename T>
inline const T* Tensor::data() const {
73 74 75 76 77 78
  check_memory_size();
  PADDLE_ENFORCE(std::is_same<T, void>::value ||
                     holder_->type().hash_code() == typeid(T).hash_code(),
                 "Tensor holds the wrong type, it holds %s",
                 this->holder_->type().name());

L
liaogang 已提交
79 80 81 82 83 84
  return reinterpret_cast<const T*>(
      reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
}

template <typename T>
inline T* Tensor::data() {
85 86 87 88 89
  check_memory_size();
  PADDLE_ENFORCE(std::is_same<T, void>::value ||
                     holder_->type().hash_code() == typeid(T).hash_code(),
                 "Tensor holds the wrong type, it holds %s",
                 this->holder_->type().name());
L
liaogang 已提交
90 91 92 93 94 95
  return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
                              offset_);
}

template <typename T>
inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
96
  static_assert(std::is_pod<T>::value, "T must be POD");
L
liaogang 已提交
97 98 99 100 101 102
  Resize(dims);
  return mutable_data<T>(place);
}

template <typename T>
inline T* Tensor::mutable_data(platform::Place place) {
103
  static_assert(std::is_pod<T>::value, "T must be POD");
104 105 106 107 108 109 110
  return reinterpret_cast<T*>(mutable_data(place, typeid(T)));
}

inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
  if (holder_ != nullptr) {
    holder_->set_type(type);
  }
Q
qingqing01 已提交
111
  PADDLE_ENFORCE_GT(numel(), 0,
Y
Yan Chunwei 已提交
112 113
                    "Tensor's numel must be larger than zero to call "
                    "Tensor::mutable_data. Call Tensor::set_dim first.");
114
  int64_t size = numel() * SizeOfType(type);
L
liaogang 已提交
115 116 117 118
  /* some versions of boost::variant don't have operator!= */
  if (holder_ == nullptr || !(holder_->place() == place) ||
      holder_->size() < size + offset_) {
    if (platform::is_cpu_place(place)) {
119 120
      holder_.reset(new PlaceholderImpl<platform::CPUPlace>(
          boost::get<platform::CPUPlace>(place), size, type));
Q
qijun 已提交
121
    } else if (platform::is_gpu_place(place)) {
122
#ifndef PADDLE_WITH_CUDA
Q
qijun 已提交
123
      PADDLE_THROW("'GPUPlace' is not supported in CPU only device.");
L
liaogang 已提交
124
    }
Q
qijun 已提交
125
#else
126 127
      holder_.reset(new PlaceholderImpl<platform::GPUPlace>(
          boost::get<platform::GPUPlace>(place), size, type));
L
liaogang 已提交
128 129 130 131
    }
#endif
    offset_ = 0;
  }
132 133 134 135 136 137 138 139
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
                                 offset_);
}

inline void* Tensor::mutable_data(platform::Place place) {
  PADDLE_ENFORCE(this->holder_ != nullptr,
                 "Cannot invoke mutable data if current hold nothing");
  return mutable_data(place, holder_->type());
L
liaogang 已提交
140 141
}

S
Superjom 已提交
142
inline Tensor& Tensor::ShareDataWith(const Tensor& src) {
143
  src.check_memory_size();
L
liaogang 已提交
144
  *this = src;
S
Superjom 已提交
145
  return *this;
L
liaogang 已提交
146 147 148
}

inline void Tensor::CopyFrom(const Tensor& src,
149 150
                             const platform::Place& dst_place,
                             const platform::DeviceContext& ctx) {
151
  src.check_memory_size();
L
liaogang 已提交
152 153 154
  Resize(src.dims());

  auto src_place = src.holder_->place();
155
  auto src_ptr = src.data<void>();
L
liaogang 已提交
156

157
  auto dst_ptr = mutable_data(dst_place, src.type());
L
liaogang 已提交
158

159
  auto size = src.numel() * SizeOfType(src.type());
L
liaogang 已提交
160

Q
qijun 已提交
161
  if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
L
liaogang 已提交
162 163 164
    memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr,
                 boost::get<platform::CPUPlace>(src_place), src_ptr, size);
  }
165
#ifdef PADDLE_WITH_CUDA
Q
qijun 已提交
166 167
  else if (platform::is_gpu_place(src_place) &&
           platform::is_cpu_place(dst_place)) {
168 169 170 171 172 173 174 175 176
    auto src_gpu_place = boost::get<platform::GPUPlace>(src_place);
    auto dst_cpu_place = boost::get<platform::CPUPlace>(dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE(platform::is_gpu_place(ctx_place));
    auto ctx_gpu_place = boost::get<platform::GPUPlace>(ctx_place);
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place);
    memory::Copy(
        dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size,
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream());
Q
qijun 已提交
177 178
  } else if (platform::is_cpu_place(src_place) &&
             platform::is_gpu_place(dst_place)) {
179 180 181 182 183 184 185 186 187
    auto src_cpu_place = boost::get<platform::CPUPlace>(src_place);
    auto dst_gpu_place = boost::get<platform::GPUPlace>(dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE(platform::is_gpu_place(ctx_place));
    auto ctx_gpu_place = boost::get<platform::GPUPlace>(ctx_place);
    PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place);
    memory::Copy(
        dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size,
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream());
Q
qijun 已提交
188 189
  } else if (platform::is_gpu_place(src_place) &&
             platform::is_gpu_place(dst_place)) {
190 191 192 193 194 195 196 197 198
    auto src_gpu_place = boost::get<platform::GPUPlace>(src_place);
    auto dst_gpu_place = boost::get<platform::GPUPlace>(dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE(platform::is_gpu_place(ctx_place));
    auto ctx_gpu_place = boost::get<platform::GPUPlace>(ctx_place);
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place);
    memory::Copy(
        dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream());
L
liaogang 已提交
199 200
  }
#endif
Q
qijun 已提交
201
}
L
liaogang 已提交
202

203 204
template <typename T>
inline void Tensor::CopyFromVector(const std::vector<T>& src,
205 206
                                   const platform::DeviceContext& ctx) {
  auto dst_place = ctx.GetPlace();
207 208 209 210 211 212 213 214 215 216 217
  auto src_ptr = static_cast<const void*>(src.data());
  platform::CPUPlace src_place;
  auto dst_ptr = static_cast<void*>(mutable_data<T>(dst_place));
  auto size = src.size() * sizeof(T);

  if (platform::is_cpu_place(dst_place)) {
    memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr, src_place,
                 src_ptr, size);
  }
#ifdef PADDLE_WITH_CUDA
  else if (platform::is_gpu_place(dst_place)) {
218 219 220 221
    memory::Copy(
        boost::get<platform::GPUPlace>(dst_place), dst_ptr, src_place, src_ptr,
        size,
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream());
222 223 224 225
  }
#endif
}

L
liaogang 已提交
226
inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
227
  check_memory_size();
228 229 230
  PADDLE_ENFORCE_GE(begin_idx, 0,
                    "The start row index must be greater than 0.");
  PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound.");
C
caoying03 已提交
231 232 233
  PADDLE_ENFORCE_LT(
      begin_idx, end_idx,
      "The start row index must be smaller than the end row index.");
H
hedaoyuan 已提交
234 235 236 237 238 239 240 241 242 243

  if (dims_[0] == 1) {
    return *this;
  } else {
    size_t base = numel() / dims_[0];
    Tensor dst;
    dst.holder_ = holder_;
    DDim dst_dims = dims_;
    dst_dims[0] = end_idx - begin_idx;
    dst.Resize(dst_dims);
244
    dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
H
hedaoyuan 已提交
245 246
    return dst;
  }
L
liaogang 已提交
247 248
}

S
Superjom 已提交
249 250 251 252
inline Tensor& Tensor::Resize(const DDim& dims) {
  dims_ = dims;
  return *this;
}
L
liaogang 已提交
253 254 255

inline const DDim& Tensor::dims() const { return dims_; }

Y
Yu Yang 已提交
256
inline int64_t Tensor::numel() const { return product(dims_); }
257

F
fengjiayi 已提交
258
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
F
WIP  
fengjiayi 已提交
259
  Tensor res;
260
  res.ShareDataWith(src);
F
fengjiayi 已提交
261
  res.Resize(flatten_to_2d(src.dims(), num_col_dims));
F
WIP  
fengjiayi 已提交
262 263 264
  return res;
}

L
liaogang 已提交
265 266
}  // namespace framework
}  // namespace paddle