tensor.h 5.9 KB
Newer Older
Y
Yi Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#pragma once

17
#include <cstdint>
18
#include <cstring>
F
fengjiayi 已提交
19 20 21 22 23 24 25
#include <memory>
#include <type_traits>
#include "paddle/framework/ddim.h"
#include "paddle/framework/enforce.h"
#include "paddle/memory/memory.h"
#include "paddle/platform/place.h"

Y
Yi Wang 已提交
26 27 28 29 30
namespace paddle {
namespace framework {

class Tensor {
 public:
F
fengjiayi 已提交
31
  Tensor() : offset_(0) { numel_ = product(dims_); }
32

F
fengjiayi 已提交
33
  Tensor& operator=(const Tensor& src) = delete;
34

Y
Yi Wang 已提交
35 36
  template <typename T>
  const T* data() const {
F
fengjiayi 已提交
37
    CheckDimsValidity();
38 39
    return reinterpret_cast<const T*>(
        reinterpret_cast<uintptr_t>(holder_->Ptr()) + offset_);
Y
Yi Wang 已提交
40 41
  }

F
fengjiayi 已提交
42
  template <typename T>
F
fengjiayi 已提交
43
  T* mutable_data(DDim dims, paddle::platform::Place place) {
F
fengjiayi 已提交
44
    set_dims(dims);
45 46 47
    return mutable_data<T>(place);
  }

F
fengjiayi 已提交
48
  template <typename T>
49
  T* mutable_data(paddle::platform::Place place) {
F
fengjiayi 已提交
50 51 52
    PADDLE_ENFORCE(numel_ > 0,
                   "Tensor::numel_ must be larger than zero to call "
                   "Tensor::mutable_data.");
F
fengjiayi 已提交
53 54 55
    if (holder_ == nullptr ||
        !(holder_->Place() ==
          place) /* some versions of boost::variant don't have operator!= */
F
fengjiayi 已提交
56 57
        || holder_->Size() < numel_ * sizeof(T) + offset_) {
      holder_.reset(new PlaceholderImpl<T>(place, numel_ * sizeof(T)));
58
      offset_ = 0;
Y
Yi Wang 已提交
59
    }
60 61
    return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->Ptr()) +
                                offset_);
Y
Yi Wang 已提交
62 63
  }

64
  void ShareDataFrom(const Tensor& src) {
F
fengjiayi 已提交
65
    src.CheckDimsValidity();
66
    holder_ = src.holder_;
F
fengjiayi 已提交
67 68
    dims_ = src.dims();
    numel_ = src.numel_;
69
    offset_ = src.offset_;
Y
Yi Wang 已提交
70 71
  }

72
  void CopyFrom(const Tensor& src, paddle::platform::Place dst_place) {
F
fengjiayi 已提交
73 74
    src.CheckDimsValidity();
    size_t size = src.numel_ * src.holder_->TypeSize();
75 76
    holder_.reset(src.holder_->Clone(src.offset_, size, dst_place));
    dims_ = src.dims();
F
fengjiayi 已提交
77
    numel_ = src.numel_;
78 79 80
    offset_ = 0;
  }

F
fengjiayi 已提交
81
  Tensor Slice(const int& begin_idx, const int& end_idx) const {
F
fengjiayi 已提交
82
    CheckDimsValidity();
83 84 85 86 87 88 89 90 91 92 93 94
    PADDLE_ENFORCE(begin_idx >= 0 && end_idx <= dims_[0],
                   "Slice index is less than zero or out of bound.");
    PADDLE_ENFORCE(begin_idx < end_idx,
                   "Begin index must be less than end index.");
    PADDLE_ENFORCE(dims_[0] != 1, "Can not slice a tensor with dims_[0] = 1.");
    std::vector<int> d = vectorize(dims_);
    int base = 1;
    for (size_t i = 1; i < d.size(); ++i) {
      base *= d[i];
    }
    Tensor dst;
    dst.holder_ = holder_;
F
fengjiayi 已提交
95 96 97
    DDim dst_dims = dims_;
    dst_dims[0] = end_idx - begin_idx;
    dst.set_dims(dst_dims);
98 99 100 101
    dst.offset_ = offset_ + begin_idx * base * holder_->TypeSize();
    return dst;
  }

F
fengjiayi 已提交
102 103 104 105 106 107 108 109 110
  void set_dims(const DDim& dims) {
    if (dims == dims_) {
      return;
    }
    dims_ = dims;
    numel_ = product(dims_);
    return;
  }

111 112
  DDim dims() const { return dims_; }

Y
Yi Wang 已提交
113 114 115 116 117 118
 private:
  // Placeholder hides type T, so it doesn't appear as a template
  // parameter of Variable.
  struct Placeholder {
    virtual ~Placeholder() {}
    virtual void* Ptr() const = 0;
F
fengjiayi 已提交
119
    virtual paddle::platform::Place Place() const = 0;
Y
Yi Wang 已提交
120
    virtual size_t Size() const = 0;
121
    virtual size_t TypeSize() const = 0;
122 123
    virtual Placeholder* Clone(size_t begin, size_t size,
                               paddle::platform::Place place) const = 0;
Y
Yi Wang 已提交
124 125 126 127
  };

  template <typename T>
  struct PlaceholderImpl : public Placeholder {
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
   private:
    class Deleter {
     public:
      Deleter(platform::Place place) : place_(place) {}
      void operator()(T* ptr) {
        paddle::memory::Free(place_, static_cast<void*>(ptr));
      }

     private:
      paddle::platform::Place place_;
    };

   public:
    PlaceholderImpl(paddle::platform::Place place, size_t size)
        : ptr_(static_cast<T*>(paddle::memory::Alloc(place, size)),
               Deleter(place)),
          place_(place),
Y
Yi Wang 已提交
145 146 147 148
          size_(size) {}

    virtual void* Ptr() const { return static_cast<void*>(ptr_.get()); }
    virtual size_t Size() const { return size_; }
F
fengjiayi 已提交
149
    virtual paddle::platform::Place Place() const { return place_; }
150
    virtual size_t TypeSize() const { return sizeof(T); }
151 152 153 154 155 156 157 158 159 160 161 162
    // TODO: Clone only support CPU now. GPU support is needed.
    virtual Placeholder* Clone(size_t begin, size_t size,
                               paddle::platform::Place place) const {
      PADDLE_ENFORCE(paddle::platform::is_cpu_place(place_) &&
                         paddle::platform::is_cpu_place(place),
                     "PlaceholderImpl::Clone only support CPU now.");
      PlaceholderImpl<T>* dst = new PlaceholderImpl<T>(place, size);
      void* begin_ptr =
          reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(Ptr()) + begin);
      memcpy(dst->Ptr(), begin_ptr, size);
      return dst;
    }
Y
Yi Wang 已提交
163

164
    std::unique_ptr<T, Deleter> ptr_;
F
fengjiayi 已提交
165 166
    paddle::platform::Place place_;  // record the place of ptr_.
    size_t size_;                    // size of the memory block.
Y
Yi Wang 已提交
167 168
  };

F
fengjiayi 已提交
169 170 171 172 173 174 175 176
  inline void CheckDimsValidity() {
    PADDLE_ENFORCE(holder_ != nullptr,
                   "Tenosr holds no memory. Call Tensor::mutable_data first.");
    PADDLE_ENFORCE(holder_->Size() > numel_ * sizeof(T) + offset_,
                   "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
                   "first to re-allocate memory.");
  }

177
  std::shared_ptr<Placeholder> holder_;  // holds the memory block if allocated.
178
  DDim dims_;
F
fengjiayi 已提交
179
  int numel_;      // cache of `product(dims_)`
180
  size_t offset_;  // marks the begin of tensor data area.
Y
Yi Wang 已提交
181 182 183 184
};

}  // namespace framework
}  // namespace paddle