tensor.h 7.7 KB
Newer Older
W
wangliu 已提交
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
朔-望's avatar
朔-望 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <cstdint>
#include <cstring>
H
hanbuhe 已提交
19
#include <fstream>
朔-望's avatar
朔-望 已提交
20
#include <memory>
H
hanbuhe 已提交
21
#include <string>
22
#include <type_traits>
朔-望's avatar
朔-望 已提交
23 24 25
#include <typeindex>
#include <vector>

L
liuruilong 已提交
26
#include "common/enforce.h"
L
liuruilong 已提交
27
#include "framework/data_layout.h"
L
liuruilong 已提交
28
#include "framework/tensor_base.h"
朔-望's avatar
朔-望 已提交
29 30 31
#include "memory/t_malloc.h"

namespace paddle_mobile {
朔-望's avatar
朔-望 已提交
32 33 34 35
namespace framework {

class LoDTensor;

L
liuruilong 已提交
36
class Tensor : public TensorBase {
朔-望's avatar
朔-望 已提交
37
 public:
L
liuruilong 已提交
38
  Tensor() {}
39
  template <typename T>
L
liuruilong 已提交
40
  Tensor(std::vector<T> input, DDim ddim) {
41 42 43
    PADDLE_MOBILE_ENFORCE(
        input.size() == framework::product(ddim),
        "input vector'length should be equal to tensor's length");
L
liuruilong 已提交
44

45 46 47 48 49
    auto input_ptr = mutable_data<T>(ddim);
    for (int i = 0; i < input.size(); ++i) {
      input_ptr[i] = input[i];
    }
  }
50

L
liuruilong 已提交
51 52 53 54 55 56
  Tensor(const Tensor &inTensor) {
    this->dims_ = inTensor.dims_;
    this->holder_ = inTensor.holder_;
    this->offset_ = inTensor.offset_;
  }

L
liuruilong 已提交
57 58 59 60
  /*! Resize the dimensions of the memory block. */
  inline Tensor &Resize(const DDim &dims) {
    dims_ = dims;
    return *this;
61 62
  }

L
liuruilong 已提交
63 64 65 66 67 68 69
  /*! The internal of two tensors share the same memory block. */
  inline Tensor &ShareDataWith(const Tensor &src) {
    src.check_memory_size();
    if (holder_.get() != src.holder_.get()) {
      *this = src;
    }
    return *this;
70 71 72 73 74
  }

  inline void *mutable_data(std::type_index type) {
    if (holder_ != nullptr) {
      holder_->set_type(type);
朔-望's avatar
朔-望 已提交
75
    }
L
liuruilong 已提交
76
    PADDLE_MOBILE_ENFORCE(numel() >= 0, "the Tensor's numel must >=0.")
77 78 79 80
    int64_t size = numel() * SizeOfType(type);
    if (holder_ == nullptr || holder_->size() < size + offset_) {
      holder_.reset(new PlaceholderImpl(size, type));
      offset_ = 0;
朔-望's avatar
朔-望 已提交
81
    }
82 83 84 85
    return reinterpret_cast<void *>(
        reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
  }

L
liuruilong 已提交
86 87 88 89 90 91 92 93 94 95
  /**
   * @brief   Return a pointer to mutable memory block.
   * @note    If not exist, then allocation.
   */
  template <typename T>
  inline T *mutable_data() {
    static_assert(std::is_pod<T>::value, "T must be POD");
    return reinterpret_cast<T *>(mutable_data(typeid(T)));
  }

96 97 98 99 100 101 102 103
  /**
   * @brief     Return a pointer to mutable memory block.
   *
   * @param[in] dims    The dimensions of the memory block.
   * @param[in] place   The place of the memory block.
   *
   * @note      If not exist, then allocation.
   */
朔-望's avatar
朔-望 已提交
104 105
  template <typename T>
  inline T *mutable_data(DDim dims) {
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
    static_assert(std::is_pod<T>::value, "T must be POD");
    Resize(dims);
    return mutable_data<T>();
  }

  /**
   * @brief  Return a sub-tensor of the given tensor.
   *
   * @param[in] begin_idx   The index of the start row(inclusive) to
   * slice.
   *                        The index number begins from 0.
   * @param[in] end_idx     The index of the end row(exclusive) to
   * slice.
   *                        The index number begins from 0.
   */
  inline Tensor Slice(int begin_idx, int end_idx) const {
    check_memory_size();
123 124 125 126 127 128 129
    PADDLE_MOBILE_ENFORCE(begin_idx >= 0,
                          "The start row index must be greater than 0.")
    PADDLE_MOBILE_ENFORCE(end_idx <= dims_[0],
                          "The end row index is out of bound.")
    PADDLE_MOBILE_ENFORCE(
        begin_idx < end_idx,
        "The start row index must be lesser than the end row index")
130 131 132 133 134 135 136 137 138 139 140
    if (dims_[0] == 1) {
      return *this;
    } else {
      size_t base = numel() / dims_[0];
      Tensor dst;
      dst.holder_ = holder_;
      DDim dst_dims = dims_;
      dst_dims[0] = end_idx - begin_idx;
      dst.Resize(dst_dims);
      dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
      return dst;
朔-望's avatar
朔-望 已提交
141
    }
142 143
  }

L
liuruilong 已提交
144 145 146 147
  /*! Return a pointer to mutable memory block. */
  template <typename T>
  inline T *data() {
    check_memory_size();
148
    PADDLE_MOBILE_ENFORCE(
L
liuruilong 已提交
149 150
        (std::is_same<T, void>::value ||
         holder_->type().hash_code() == typeid(T).hash_code()),
H
hjchen2 已提交
151 152
        "Tensor holds the wrong type, it holds %s, requested %s",
        this->holder_->type().name(), typeid(T).name());
153

L
liuruilong 已提交
154 155
    return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
                                 offset_);
156 157
  }

L
liuruilong 已提交
158 159 160 161
  /*! Return a pointer to constant memory block. */
  template <typename T>
  inline const T *data() const {
    check_memory_size();
W
wangliu 已提交
162
    PADDLE_MOBILE_ENFORCE(
L
liuruilong 已提交
163 164
        (std::is_same<T, void>::value ||
         holder_->type().hash_code() == typeid(T).hash_code()),
H
hjchen2 已提交
165
        "Tensor holds the wrong type, it holds %s, requested %s",
L
liuruilong 已提交
166 167 168 169
        this->holder_->type().name(), typeid(T).name());

    return reinterpret_cast<const T *>(
        reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
170 171
  }

朔-望's avatar
朔-望 已提交
172
 private:
173 174 175 176
  struct PlaceholderImpl : public Placeholder {
    PlaceholderImpl(size_t size, std::type_index type)
        : ptr_(static_cast<uint8_t *>(memory::Alloc(size)),
               memory::PODDeleter<uint8_t>()),
朔-望's avatar
朔-望 已提交
177 178
          size_(size),
          type_(type) {
179 180
      PADDLE_MOBILE_ENFORCE(ptr_ != nullptr,
                            "Insufficient memory to allocation");
朔-望's avatar
朔-望 已提交
181 182
    }

183
    virtual size_t size() const { return size_; }
朔-望's avatar
朔-望 已提交
184

185
    virtual void *ptr() const { return static_cast<void *>(ptr_.get()); }
朔-望's avatar
朔-望 已提交
186

187
    virtual std::type_index type() const { return type_; }
朔-望's avatar
朔-望 已提交
188

189
    virtual void set_type(std::type_index type) { type_ = type; }
190

191
    std::unique_ptr<uint8_t, memory::PODDeleter<uint8_t>> ptr_;
朔-望's avatar
朔-望 已提交
192

193 194
    /*! the size of memory block. */
    size_t size_;
朔-望's avatar
朔-望 已提交
195

196 197 198
    /* the current type of memory */
    std::type_index type_;
  };
朔-望's avatar
朔-望 已提交
199

Z
zhangyang 已提交
200
#ifdef PADDLE_MOBILE_FPGA
201
 public:  // NOLINT
Z
zhangyang 已提交
202
  inline void reset_data_ptr(void *p) {
203
    ((PlaceholderImpl *)(holder_.get()))->ptr_.reset((uint8_t *)p);  // NOLINT
Z
zhangyang 已提交
204
  }
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219

  inline void *init(std::type_index type) {
    if (holder_ != nullptr) {
      holder_->set_type(type);
    }
    PADDLE_MOBILE_ENFORCE(numel() >= 0, "the Tensor's numel must >=0.")
    int64_t size = 1 * SizeOfType(type);
    if (holder_ == nullptr || holder_->size() < size + offset_) {
      holder_.reset(new PlaceholderImpl(size, type));
      offset_ = 0;
    }
    return reinterpret_cast<void *>(
        reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
  }

Z
zhangyang 已提交
220
  float scale[2];  // scale[0]= MAX/127.0, scale[1]= 127.0/MAX
Z
zhangyang 已提交
221
#endif
朔-望's avatar
朔-望 已提交
222 223
};

224 225 226 227 228
#ifdef PADDLE_MOBILE_DEBUG
inline Print &operator<<(Print &printer, const Tensor &tensor) {
  printer << " dims: " << tensor.dims() << "\n";
  int stride = tensor.numel() / 20;
  stride = stride > 0 ? stride : 1;
H
hanbuhe 已提交
229
#ifndef PADDLE_MOBILE_FPGA
230
  for (int i = 0; i < tensor.numel(); i += stride) {
xiebaiyuan's avatar
xiebaiyuan 已提交
231 232
    if (tensor.type() == typeid(float)) {
      printer << tensor.data<float>()[i] << " ";
233 234
    } else if (tensor.type() == typeid(int32_t)) {
      printer << tensor.data<int32_t>()[i] << " ";
xiebaiyuan's avatar
xiebaiyuan 已提交
235 236
    } else if (tensor.type() == typeid(int64_t)) {
      printer << tensor.data<int64_t>()[i] << " ";
H
hjchen2 已提交
237
    } else if (tensor.type() == typeid(int8_t)) {
238 239 240
      printer << static_cast<int>(tensor.data<int8_t>()[i]) << " ";
    } else if (tensor.type() == typeid(int32_t)) {
      printer << tensor.data<int32_t>()[i] << " ";
xiebaiyuan's avatar
xiebaiyuan 已提交
241
    }
242
  }
H
hanbuhe 已提交
243
#endif
244 245 246 247 248
  return printer;
}

#endif

朔-望's avatar
朔-望 已提交
249
inline Tensor ReshapeToMatrix(const Tensor &src, int num_col_dims) {
250 251 252 253
  Tensor res;
  res.ShareDataWith(src);
  res.Resize(flatten_to_2d(src.dims(), num_col_dims));
  return res;
朔-望's avatar
朔-望 已提交
254 255
}

朔-望's avatar
朔-望 已提交
256 257
}  // namespace framework
}  // namespace paddle_mobile