tensor_impl.h 5.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
yuyang18 已提交
16
#include "paddle/fluid/framework/data_type.h"
Y
Yi Wang 已提交
17 18
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
K
kexinzhao 已提交
19
#include "paddle/fluid/platform/float16.h"
L
liaogang 已提交
20 21 22

namespace paddle {
namespace framework {
Y
yuyang18 已提交
23
extern size_t SizeOfType(std::type_index type);
L
liaogang 已提交
24
inline void Tensor::check_memory_size() const {
Y
Yan Chunwei 已提交
25
  PADDLE_ENFORCE_NOT_NULL(
Z
Zhuoyuan 已提交
26
      holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
Y
Yu Yang 已提交
27
  PADDLE_ENFORCE_LE(
Y
Yu Yang 已提交
28
      numel() * SizeOfType(type()), memory_size(),
S
Superjom 已提交
29 30 31
      "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
      "first to re-allocate memory.\n"
      "or maybe the required data-type mismatches the data already stored.");
L
liaogang 已提交
32 33
}

Y
Yu Yang 已提交
34
inline size_t Tensor::memory_size() const {
Y
Yu Yang 已提交
35
  return holder_ == nullptr ? 0UL : holder_->size() - offset_;
Y
Yu Yang 已提交
36 37
}

L
liaogang 已提交
38 39
template <typename T>
inline const T* Tensor::data() const {
40 41 42 43 44 45
  check_memory_size();
  PADDLE_ENFORCE(std::is_same<T, void>::value ||
                     holder_->type().hash_code() == typeid(T).hash_code(),
                 "Tensor holds the wrong type, it holds %s",
                 this->holder_->type().name());

L
liaogang 已提交
46 47 48 49
  return reinterpret_cast<const T*>(
      reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
}

50 51
inline bool Tensor::IsInitialized() const { return holder_ != nullptr; }

L
liaogang 已提交
52 53
template <typename T>
inline T* Tensor::data() {
54 55 56 57 58
  check_memory_size();
  PADDLE_ENFORCE(std::is_same<T, void>::value ||
                     holder_->type().hash_code() == typeid(T).hash_code(),
                 "Tensor holds the wrong type, it holds %s",
                 this->holder_->type().name());
L
liaogang 已提交
59 60 61 62 63
  return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
                              offset_);
}

template <typename T>
C
chengduoZH 已提交
64
inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
65
  static_assert(std::is_pod<T>::value, "T must be POD");
L
liaogang 已提交
66
  Resize(dims);
C
chengduoZH 已提交
67
  return mutable_data<T>(place);
L
liaogang 已提交
68 69 70
}

template <typename T>
C
chengduoZH 已提交
71
inline T* Tensor::mutable_data(platform::Place place) {
72
  static_assert(std::is_pod<T>::value, "T must be POD");
C
chengduoZH 已提交
73
  return reinterpret_cast<T*>(mutable_data(place, typeid(T)));
74 75
}

C
chengduoZH 已提交
76
inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
77 78 79
  if (holder_ != nullptr) {
    holder_->set_type(type);
  }
Q
Qiao Longfei 已提交
80 81 82 83
  PADDLE_ENFORCE_GE(numel(), 0,
                    "When calling this method, the Tensor's numel must be "
                    "equal or larger than zero. "
                    "Please check Tensor::Resize has been called first.");
84
  int64_t size = numel() * SizeOfType(type);
L
liaogang 已提交
85 86 87 88
  /* some versions of boost::variant don't have operator!= */
  if (holder_ == nullptr || !(holder_->place() == place) ||
      holder_->size() < size + offset_) {
    if (platform::is_cpu_place(place)) {
89
      holder_.reset(new PlaceholderImpl<platform::CPUPlace>(
C
chengduoZH 已提交
90
          boost::get<platform::CPUPlace>(place), size, type));
C
chengduoZH 已提交
91 92
    } else if (platform::is_gpu_place(place) ||
               platform::is_cuda_pinned_place(place)) {
93
#ifndef PADDLE_WITH_CUDA
C
chengduoZH 已提交
94
      PADDLE_THROW(
C
chengduoZH 已提交
95
          "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode.");
L
liaogang 已提交
96
    }
Q
qijun 已提交
97
#else
C
chengduoZH 已提交
98 99 100 101 102 103 104
      if (platform::is_gpu_place(place)) {
        holder_.reset(new PlaceholderImpl<platform::CUDAPlace>(
            boost::get<platform::CUDAPlace>(place), size, type));
      } else if (platform::is_cuda_pinned_place(place)) {
        holder_.reset(new PlaceholderImpl<platform::CUDAPinnedPlace>(
            boost::get<platform::CUDAPinnedPlace>(place), size, type));
      }
L
liaogang 已提交
105 106 107 108
    }
#endif
    offset_ = 0;
  }
109 110 111 112
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
                                 offset_);
}

C
chengduoZH 已提交
113
inline void* Tensor::mutable_data(platform::Place place) {
114
  PADDLE_ENFORCE(this->holder_ != nullptr,
C
chengduoZH 已提交
115
                 "Cannot invoke mutable data if current hold nothing.");
C
chengduoZH 已提交
116
  return mutable_data(place, holder_->type());
L
liaogang 已提交
117 118
}

S
Superjom 已提交
119
inline Tensor& Tensor::ShareDataWith(const Tensor& src) {
120
  src.check_memory_size();
L
liaogang 已提交
121
  *this = src;
S
Superjom 已提交
122
  return *this;
L
liaogang 已提交
123 124
}

Y
Yu Yang 已提交
125
inline Tensor Tensor::Slice(int begin_idx, int end_idx) const {
126
  check_memory_size();
127 128 129
  PADDLE_ENFORCE_GE(begin_idx, 0,
                    "The start row index must be greater than 0.");
  PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound.");
C
caoying03 已提交
130 131
  PADDLE_ENFORCE_LT(
      begin_idx, end_idx,
C
caoying03 已提交
132
      "The start row index must be lesser than the end row index.");
H
hedaoyuan 已提交
133 134 135 136 137 138 139

  if (dims_[0] == 1) {
    return *this;
  } else {
    size_t base = numel() / dims_[0];
    Tensor dst;
    dst.holder_ = holder_;
D
dzhwinter 已提交
140
    dst.set_layout(layout_);
H
hedaoyuan 已提交
141 142 143
    DDim dst_dims = dims_;
    dst_dims[0] = end_idx - begin_idx;
    dst.Resize(dst_dims);
144
    dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
H
hedaoyuan 已提交
145 146
    return dst;
  }
L
liaogang 已提交
147 148
}

S
Superjom 已提交
149 150 151 152
inline Tensor& Tensor::Resize(const DDim& dims) {
  dims_ = dims;
  return *this;
}
L
liaogang 已提交
153 154 155

inline const DDim& Tensor::dims() const { return dims_; }

Y
Yu Yang 已提交
156
inline int64_t Tensor::numel() const { return product(dims_); }
157

F
fengjiayi 已提交
158
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
F
WIP  
fengjiayi 已提交
159
  Tensor res;
160
  res.ShareDataWith(src);
F
fengjiayi 已提交
161
  res.Resize(flatten_to_2d(src.dims(), num_col_dims));
F
WIP  
fengjiayi 已提交
162 163 164
  return res;
}

L
liaogang 已提交
165 166
}  // namespace framework
}  // namespace paddle