tensor_impl.h 6.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
Yi Wang 已提交
16 17
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
K
kexinzhao 已提交
18
#include "paddle/fluid/platform/float16.h"
L
liaogang 已提交
19 20 21 22

namespace paddle {
namespace framework {

23 24 25
template <typename... T>
struct SizeOfTypeFunctor;

L
liaogang 已提交
26
template <typename T>
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
struct SizeOfTypeFunctor<T> {
  size_t operator()(std::type_index type) const {
    if (typeid(T).hash_code() == type.hash_code()) {
      return sizeof(T);
    } else {
      return 0UL;
    }
  }
};

template <>
struct SizeOfTypeFunctor<> {
  size_t operator()(std::type_index type) const { return 0UL; }
};

template <typename HEAD, typename... TAIL>
struct SizeOfTypeFunctor<HEAD, TAIL...> {
  size_t operator()(std::type_index type) const {
    SizeOfTypeFunctor<HEAD> head;
    size_t head_size = head(type);
    if (head_size != 0) {
      return head_size;
    }
    SizeOfTypeFunctor<TAIL...> tail;
    return tail(type);
  }
};

static inline size_t SizeOfType(std::type_index type) {
K
kexinzhao 已提交
56 57 58
  SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t,
                    platform::float16>
      functor;
59 60 61 62 63
  size_t size = functor(type);
  PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name());
  return size;
}

L
liaogang 已提交
64
inline void Tensor::check_memory_size() const {
Y
Yan Chunwei 已提交
65
  PADDLE_ENFORCE_NOT_NULL(
Z
Zhuoyuan 已提交
66
      holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
Y
Yu Yang 已提交
67
  PADDLE_ENFORCE_LE(
Y
Yu Yang 已提交
68
      numel() * SizeOfType(type()), memory_size(),
S
Superjom 已提交
69 70 71
      "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
      "first to re-allocate memory.\n"
      "or maybe the required data-type mismatches the data already stored.");
L
liaogang 已提交
72 73
}

Y
Yu Yang 已提交
74
inline size_t Tensor::memory_size() const {
Y
Yu Yang 已提交
75
  return holder_ == nullptr ? 0UL : holder_->size() - offset_;
Y
Yu Yang 已提交
76 77
}

L
liaogang 已提交
78 79
template <typename T>
inline const T* Tensor::data() const {
80 81 82 83 84 85
  check_memory_size();
  PADDLE_ENFORCE(std::is_same<T, void>::value ||
                     holder_->type().hash_code() == typeid(T).hash_code(),
                 "Tensor holds the wrong type, it holds %s",
                 this->holder_->type().name());

L
liaogang 已提交
86 87 88 89
  return reinterpret_cast<const T*>(
      reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
}

90 91
inline bool Tensor::IsInitialized() const { return holder_ != nullptr; }

L
liaogang 已提交
92 93
template <typename T>
inline T* Tensor::data() {
94 95 96 97 98
  check_memory_size();
  PADDLE_ENFORCE(std::is_same<T, void>::value ||
                     holder_->type().hash_code() == typeid(T).hash_code(),
                 "Tensor holds the wrong type, it holds %s",
                 this->holder_->type().name());
L
liaogang 已提交
99 100 101 102 103 104
  return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
                              offset_);
}

template <typename T>
inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
105
  static_assert(std::is_pod<T>::value, "T must be POD");
L
liaogang 已提交
106 107 108 109 110 111
  Resize(dims);
  return mutable_data<T>(place);
}

template <typename T>
inline T* Tensor::mutable_data(platform::Place place) {
112
  static_assert(std::is_pod<T>::value, "T must be POD");
113 114 115 116 117 118 119
  return reinterpret_cast<T*>(mutable_data(place, typeid(T)));
}

inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
  if (holder_ != nullptr) {
    holder_->set_type(type);
  }
120 121 122 123
  PADDLE_ENFORCE_GT(
      numel(), 0,
      "When calling this method, the Tensor's numel must be larger than zero. "
      "Please check Tensor::Resize has been called first.");
124
  int64_t size = numel() * SizeOfType(type);
L
liaogang 已提交
125 126 127 128
  /* some versions of boost::variant don't have operator!= */
  if (holder_ == nullptr || !(holder_->place() == place) ||
      holder_->size() < size + offset_) {
    if (platform::is_cpu_place(place)) {
129 130
      holder_.reset(new PlaceholderImpl<platform::CPUPlace>(
          boost::get<platform::CPUPlace>(place), size, type));
Q
qijun 已提交
131
    } else if (platform::is_gpu_place(place)) {
132
#ifndef PADDLE_WITH_CUDA
D
dzhwinter 已提交
133
      PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
L
liaogang 已提交
134
    }
Q
qijun 已提交
135
#else
D
dzhwinter 已提交
136 137
      holder_.reset(new PlaceholderImpl<platform::CUDAPlace>(
          boost::get<platform::CUDAPlace>(place), size, type));
L
liaogang 已提交
138 139 140 141
    }
#endif
    offset_ = 0;
  }
142 143 144 145 146 147 148 149
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
                                 offset_);
}

inline void* Tensor::mutable_data(platform::Place place) {
  PADDLE_ENFORCE(this->holder_ != nullptr,
                 "Cannot invoke mutable data if current hold nothing");
  return mutable_data(place, holder_->type());
L
liaogang 已提交
150 151
}

S
Superjom 已提交
152
inline Tensor& Tensor::ShareDataWith(const Tensor& src) {
153
  src.check_memory_size();
L
liaogang 已提交
154
  *this = src;
S
Superjom 已提交
155
  return *this;
L
liaogang 已提交
156 157
}

Y
Yu Yang 已提交
158
inline Tensor Tensor::Slice(int begin_idx, int end_idx) const {
159
  check_memory_size();
160 161 162
  PADDLE_ENFORCE_GE(begin_idx, 0,
                    "The start row index must be greater than 0.");
  PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound.");
C
caoying03 已提交
163 164
  PADDLE_ENFORCE_LT(
      begin_idx, end_idx,
C
caoying03 已提交
165
      "The start row index must be lesser than the end row index.");
H
hedaoyuan 已提交
166 167 168 169 170 171 172

  if (dims_[0] == 1) {
    return *this;
  } else {
    size_t base = numel() / dims_[0];
    Tensor dst;
    dst.holder_ = holder_;
D
dzhwinter 已提交
173
    dst.set_layout(layout_);
H
hedaoyuan 已提交
174 175 176
    DDim dst_dims = dims_;
    dst_dims[0] = end_idx - begin_idx;
    dst.Resize(dst_dims);
177
    dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
H
hedaoyuan 已提交
178 179
    return dst;
  }
L
liaogang 已提交
180 181
}

S
Superjom 已提交
182 183 184 185
inline Tensor& Tensor::Resize(const DDim& dims) {
  dims_ = dims;
  return *this;
}
L
liaogang 已提交
186 187 188

inline const DDim& Tensor::dims() const { return dims_; }

Y
Yu Yang 已提交
189
inline int64_t Tensor::numel() const { return product(dims_); }
190

F
fengjiayi 已提交
191
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
F
WIP  
fengjiayi 已提交
192
  Tensor res;
193
  res.ShareDataWith(src);
F
fengjiayi 已提交
194
  res.Resize(flatten_to_2d(src.dims(), num_col_dims));
F
WIP  
fengjiayi 已提交
195 196 197
  return res;
}

L
liaogang 已提交
198 199
}  // namespace framework
}  // namespace paddle