tensor.h 6.3 KB
Newer Older
W
wangliu 已提交
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
朔-望's avatar
朔-望 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <cstdint>
#include <cstring>
H
hanbuhe 已提交
19
#include <fstream>
朔-望's avatar
朔-望 已提交
20
#include <memory>
H
hanbuhe 已提交
21
#include <string>
22
#include <type_traits>
朔-望's avatar
朔-望 已提交
23 24 25
#include <typeindex>
#include <vector>

L
liuruilong 已提交
26
#include "common/enforce.h"
L
liuruilong 已提交
27
#include "framework/data_layout.h"
L
liuruilong 已提交
28
#include "framework/tensor_base.h"
朔-望's avatar
朔-望 已提交
29 30 31
#include "memory/t_malloc.h"

namespace paddle_mobile {
朔-望's avatar
朔-望 已提交
32 33 34 35
namespace framework {

class LoDTensor;

L
liuruilong 已提交
36
class Tensor : public TensorBase {
朔-望's avatar
朔-望 已提交
37
 public:
L
liuruilong 已提交
38
  Tensor() {}
39
  template <typename T>
L
liuruilong 已提交
40
  Tensor(std::vector<T> input, DDim ddim) {
41 42 43
    PADDLE_MOBILE_ENFORCE(
        input.size() == framework::product(ddim),
        "input vector'length should be equal to tensor's length");
L
liuruilong 已提交
44

45 46 47 48 49
    auto input_ptr = mutable_data<T>(ddim);
    for (int i = 0; i < input.size(); ++i) {
      input_ptr[i] = input[i];
    }
  }
50

L
liuruilong 已提交
51 52 53 54 55 56
  Tensor(const Tensor &inTensor) {
    this->dims_ = inTensor.dims_;
    this->holder_ = inTensor.holder_;
    this->offset_ = inTensor.offset_;
  }

W
wangliu 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
#ifdef PADDLE_MOBILE_DEBUG
  template <typename T>
  inline void dump(std::string filename) const {
    const T *dataptr = data<T>();
    std::ofstream out(filename.c_str());
    for (int i = 0; i < numel(); ++i) {
      out << dataptr[i] << " ";
    }
    out << "形状:";
    for (int j = 0; j < dims_.size(); ++j) {
      out << dims_[j] << " ";
    }
    out.close();
  }
#endif

L
liuruilong 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
  /*! Resize the dimensions of the memory block. */
  inline Tensor &Resize(const DDim &dims) {
    dims_ = dims;
    return *this;
  }

  /*! The internal of two tensors share the same memory block. */
  inline Tensor &ShareDataWith(const Tensor &src) {
    src.check_memory_size();
    if (holder_.get() != src.holder_.get()) {
      *this = src;
    }
    return *this;
  }

88 89 90
  inline void *mutable_data(std::type_index type) {
    if (holder_ != nullptr) {
      holder_->set_type(type);
朔-望's avatar
朔-望 已提交
91
    }
L
liuruilong 已提交
92
    PADDLE_MOBILE_ENFORCE(numel() >= 0, "the Tensor's numel must >=0.")
93 94 95 96
    int64_t size = numel() * SizeOfType(type);
    if (holder_ == nullptr || holder_->size() < size + offset_) {
      holder_.reset(new PlaceholderImpl(size, type));
      offset_ = 0;
朔-望's avatar
朔-望 已提交
97
    }
98 99 100 101
    return reinterpret_cast<void *>(
        reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
  }

L
liuruilong 已提交
102 103 104 105 106 107 108 109 110 111
  /**
   * @brief   Return a pointer to mutable memory block.
   * @note    If not exist, then allocation.
   */
  template <typename T>
  inline T *mutable_data() {
    static_assert(std::is_pod<T>::value, "T must be POD");
    return reinterpret_cast<T *>(mutable_data(typeid(T)));
  }

112 113 114 115 116 117 118 119
  /**
   * @brief     Return a pointer to mutable memory block.
   *
   * @param[in] dims    The dimensions of the memory block.
   * @param[in] place   The place of the memory block.
   *
   * @note      If not exist, then allocation.
   */
朔-望's avatar
朔-望 已提交
120 121
  template <typename T>
  inline T *mutable_data(DDim dims) {
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
    static_assert(std::is_pod<T>::value, "T must be POD");
    Resize(dims);
    return mutable_data<T>();
  }

  /**
   * @brief  Return a sub-tensor of the given tensor.
   *
   * @param[in] begin_idx   The index of the start row(inclusive) to
   * slice.
   *                        The index number begins from 0.
   * @param[in] end_idx     The index of the end row(exclusive) to
   * slice.
   *                        The index number begins from 0.
   */
  inline Tensor Slice(int begin_idx, int end_idx) const {
    check_memory_size();
139 140 141 142 143 144 145
    PADDLE_MOBILE_ENFORCE(begin_idx >= 0,
                          "The start row index must be greater than 0.")
    PADDLE_MOBILE_ENFORCE(end_idx <= dims_[0],
                          "The end row index is out of bound.")
    PADDLE_MOBILE_ENFORCE(
        begin_idx < end_idx,
        "The start row index must be lesser than the end row index")
146 147 148 149 150 151 152 153 154 155 156
    if (dims_[0] == 1) {
      return *this;
    } else {
      size_t base = numel() / dims_[0];
      Tensor dst;
      dst.holder_ = holder_;
      DDim dst_dims = dims_;
      dst_dims[0] = end_idx - begin_idx;
      dst.Resize(dst_dims);
      dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
      return dst;
朔-望's avatar
朔-望 已提交
157
    }
158 159
  }

朔-望's avatar
朔-望 已提交
160
 private:
161 162 163 164
  struct PlaceholderImpl : public Placeholder {
    PlaceholderImpl(size_t size, std::type_index type)
        : ptr_(static_cast<uint8_t *>(memory::Alloc(size)),
               memory::PODDeleter<uint8_t>()),
朔-望's avatar
朔-望 已提交
165 166
          size_(size),
          type_(type) {
167 168
      PADDLE_MOBILE_ENFORCE(ptr_ != nullptr,
                            "Insufficient memory to allocation");
朔-望's avatar
朔-望 已提交
169 170
    }

171
    virtual size_t size() const { return size_; }
朔-望's avatar
朔-望 已提交
172

173
    virtual void *ptr() const { return static_cast<void *>(ptr_.get()); }
朔-望's avatar
朔-望 已提交
174

175
    virtual std::type_index type() const { return type_; }
朔-望's avatar
朔-望 已提交
176

177
    virtual void set_type(std::type_index type) { type_ = type; }
178

179
    std::unique_ptr<uint8_t, memory::PODDeleter<uint8_t>> ptr_;
朔-望's avatar
朔-望 已提交
180

181 182
    /*! the size of memory block. */
    size_t size_;
朔-望's avatar
朔-望 已提交
183

184 185 186
    /* the current type of memory */
    std::type_index type_;
  };
朔-望's avatar
朔-望 已提交
187

Z
zhangyang 已提交
188
#ifdef PADDLE_MOBILE_FPGA
L
liuruilong 已提交
189

Z
zhangyang 已提交
190 191
 public:
  inline void reset_data_ptr(void *p) {
L
liuruilong 已提交
192 193
    (reinterpret_cast<PlaceholderImpl *>(holder_.get()))
        ->ptr_.reset(reinterpret_cast<uint8_t *>(p));
Z
zhangyang 已提交
194
  }
Z
zhangyang 已提交
195
  float scale[2];  // scale[0]= MAX/127.0, scale[1]= 127.0/MAX
Z
zhangyang 已提交
196
#endif
朔-望's avatar
朔-望 已提交
197 198
};

199 200 201 202 203
#ifdef PADDLE_MOBILE_DEBUG
inline Print &operator<<(Print &printer, const Tensor &tensor) {
  printer << " dims: " << tensor.dims() << "\n";
  int stride = tensor.numel() / 20;
  stride = stride > 0 ? stride : 1;
H
hanbuhe 已提交
204
#ifndef PADDLE_MOBILE_FPGA
205
  for (int i = 0; i < tensor.numel(); i += stride) {
xiebaiyuan's avatar
xiebaiyuan 已提交
206 207 208 209 210 211
    //  这不一定是float的
    if (tensor.type() == typeid(float)) {
      printer << tensor.data<float>()[i] << " ";
    } else if (tensor.type() == typeid(int64_t)) {
      printer << tensor.data<int64_t>()[i] << " ";
    }
212
  }
H
hanbuhe 已提交
213 214
#endif

215 216 217 218 219
  return printer;
}

#endif

朔-望's avatar
朔-望 已提交
220
inline Tensor ReshapeToMatrix(const Tensor &src, int num_col_dims) {
221 222 223 224
  Tensor res;
  res.ShareDataWith(src);
  res.Resize(flatten_to_2d(src.dims(), num_col_dims));
  return res;
朔-望's avatar
朔-望 已提交
225 226
}

朔-望's avatar
朔-望 已提交
227 228
}  // namespace framework
}  // namespace paddle_mobile