tensor.h 6.0 KB
Newer Older
Y
Yi Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#pragma once

17
#include <cstdint>
18
#include <cstring>
F
fengjiayi 已提交
19
#include <memory>
Y
Yu Yang 已提交
20
#include <typeindex>
F
fengjiayi 已提交
21 22 23 24 25
#include "paddle/framework/ddim.h"
#include "paddle/framework/enforce.h"
#include "paddle/memory/memory.h"
#include "paddle/platform/place.h"

Y
Yi Wang 已提交
26 27 28 29 30
namespace paddle {
namespace framework {

class Tensor {
 public:
F
fengjiayi 已提交
31
  Tensor() : numel_(0), offset_(0) {}
32

Y
Yi Wang 已提交
33 34
  template <typename T>
  const T* data() const {
F
fengjiayi 已提交
35
    CheckDims<T>();
36
    return reinterpret_cast<const T*>(
F
fengjiayi 已提交
37
        reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
Y
Yi Wang 已提交
38 39
  }

F
fengjiayi 已提交
40
  template <typename T>
F
fengjiayi 已提交
41
  T* mutable_data(DDim dims, platform::Place place) {
F
fengjiayi 已提交
42
    set_dims(dims);
43 44 45
    return mutable_data<T>(place);
  }

F
fengjiayi 已提交
46
  template <typename T>
F
fengjiayi 已提交
47
  T* mutable_data(platform::Place place) {
F
fengjiayi 已提交
48 49
    PADDLE_ENFORCE(numel_ > 0,
                   "Tensor::numel_ must be larger than zero to call "
F
fengjiayi 已提交
50
                   "Tensor::mutable_data. Call Tensor::set_dim first.");
F
fengjiayi 已提交
51
    if (holder_ == nullptr ||
F
fengjiayi 已提交
52
        !(holder_->place() ==
F
fengjiayi 已提交
53
          place) /* some versions of boost::variant don't have operator!= */
F
fengjiayi 已提交
54
        || holder_->size() < numel_ * sizeof(T) + offset_) {
55
#ifdef __CUDACC__
F
fengjiayi 已提交
56 57 58 59 60 61 62 63 64 65 66
      switch (place.which()) {
        case 0:
          holder_.reset(new PlaceholderImpl<T, platform::GPUPlace>(
              boost::get<platform::GPUPlace>(place), numel_ * sizeof(T)));
          break;

        case 1:
          holder_.reset(new PlaceholderImpl<T, platform::CPUPlace>(
              boost::get<platform::CPUPlace>(place), numel_ * sizeof(T)));
          break;
      }
67 68 69 70
#else
      holder_.reset(new PlaceholderImpl<T, platform::CPUPlace>(
          boost::get<platform::CPUPlace>(place), numel_ * sizeof(T)));
#endif
F
fengjiayi 已提交
71

72
      offset_ = 0;
Y
Yi Wang 已提交
73
    }
F
fengjiayi 已提交
74
    return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
75
                                offset_);
Y
Yi Wang 已提交
76 77
  }

F
fengjiayi 已提交
78
  template <typename T>
79
  void ShareDataFrom(const Tensor& src) {
F
fengjiayi 已提交
80
    src.CheckDims<T>();
81
    holder_ = src.holder_;
F
fengjiayi 已提交
82
    set_dims(src.dims());
83
    offset_ = src.offset_;
Y
Yi Wang 已提交
84 85
  }

F
fengjiayi 已提交
86
  template <typename T>
F
fengjiayi 已提交
87
  void CopyFrom(const Tensor& src, platform::Place dst_place) {
F
fengjiayi 已提交
88 89 90
    PADDLE_ENFORCE(platform::is_cpu_place(src.holder_->place()) &&
                       platform::is_cpu_place(dst_place),
                   "Tensor::CopyFrom only support CPU now.");
F
fengjiayi 已提交
91
    src.CheckDims<T>();
F
fengjiayi 已提交
92 93
    size_t size = src.numel_ * sizeof(T);
    set_dims(src.dims());
F
fengjiayi 已提交
94
    const void* src_ptr = static_cast<const void*>(src.data<T>());
F
fengjiayi 已提交
95 96
    void* dst_ptr = static_cast<void*>(mutable_data<T>(dst_place));
    memcpy(dst_ptr, src_ptr, size);
97 98
  }

F
fengjiayi 已提交
99
  template <typename T>
F
fengjiayi 已提交
100
  Tensor Slice(const int& begin_idx, const int& end_idx) const {
F
fengjiayi 已提交
101
    CheckDims<T>();
102 103 104 105 106 107 108 109 110 111 112 113
    PADDLE_ENFORCE(begin_idx >= 0 && end_idx <= dims_[0],
                   "Slice index is less than zero or out of bound.");
    PADDLE_ENFORCE(begin_idx < end_idx,
                   "Begin index must be less than end index.");
    PADDLE_ENFORCE(dims_[0] != 1, "Can not slice a tensor with dims_[0] = 1.");
    std::vector<int> d = vectorize(dims_);
    int base = 1;
    for (size_t i = 1; i < d.size(); ++i) {
      base *= d[i];
    }
    Tensor dst;
    dst.holder_ = holder_;
F
fengjiayi 已提交
114 115 116
    DDim dst_dims = dims_;
    dst_dims[0] = end_idx - begin_idx;
    dst.set_dims(dst_dims);
F
fengjiayi 已提交
117
    dst.offset_ = offset_ + begin_idx * base * sizeof(T);
118 119 120
    return dst;
  }

F
fengjiayi 已提交
121 122 123 124 125 126 127 128
  void set_dims(const DDim& dims) {
    if (dims == dims_) {
      return;
    }
    dims_ = dims;
    numel_ = product(dims_);
  }

129 130
  DDim dims() const { return dims_; }

Y
Yu Yang 已提交
131 132 133 134
  platform::Place place() const { return holder_->place(); }

  std::type_index type() const { return holder_->type(); }

Y
Yi Wang 已提交
135 136 137 138 139
 private:
  // Placeholder hides type T, so it doesn't appear as a template
  // parameter of Variable.
  struct Placeholder {
    virtual ~Placeholder() {}
F
fengjiayi 已提交
140
    virtual void* ptr() const = 0;
F
fengjiayi 已提交
141
    virtual platform::Place place() const = 0;
F
fengjiayi 已提交
142
    virtual size_t size() const = 0;
Y
Yu Yang 已提交
143
    virtual std::type_index type() const = 0;
Y
Yi Wang 已提交
144 145
  };

F
fengjiayi 已提交
146
  template <typename T, typename PlaceType>
Y
Yi Wang 已提交
147
  struct PlaceholderImpl : public Placeholder {
148
   private:
F
fengjiayi 已提交
149
    template <typename PType>
150 151
    class Deleter {
     public:
F
fengjiayi 已提交
152 153
      Deleter(PType place) : place_(place) {}
      void operator()(T* ptr) { memory::Free(place_, static_cast<void*>(ptr)); }
154 155

     private:
F
fengjiayi 已提交
156
      PType place_;
157 158 159
    };

   public:
F
fengjiayi 已提交
160 161 162
    PlaceholderImpl(PlaceType place, size_t size)
        : ptr_(static_cast<T*>(memory::Alloc(place, size)),
               Deleter<PlaceType>(place)),
163
          place_(place),
Y
Yi Wang 已提交
164 165
          size_(size) {}

F
fengjiayi 已提交
166 167
    virtual void* ptr() const { return static_cast<void*>(ptr_.get()); }
    virtual size_t size() const { return size_; }
Y
Yu Yang 已提交
168 169
    virtual paddle::platform::Place place() const { return place_; }
    virtual std::type_index type() const { return std::type_index(typeid(T)); }
Y
Yi Wang 已提交
170

F
fengjiayi 已提交
171 172 173
    std::unique_ptr<T, Deleter<PlaceType>> ptr_;
    platform::Place place_;  // record the place of ptr_.
    size_t size_;            // size of the memory block.
Y
Yi Wang 已提交
174 175
  };

F
fengjiayi 已提交
176
  template <typename T>
F
fengjiayi 已提交
177
  inline void CheckDims() const {
F
fengjiayi 已提交
178 179
    PADDLE_ENFORCE(holder_ != nullptr,
                   "Tenosr holds no memory. Call Tensor::mutable_data first.");
F
fengjiayi 已提交
180
    PADDLE_ENFORCE(holder_->size() >= numel_ * sizeof(T) + offset_,
F
fengjiayi 已提交
181 182 183 184
                   "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
                   "first to re-allocate memory.");
  }

185
  std::shared_ptr<Placeholder> holder_;  // holds the memory block if allocated.
186
  DDim dims_;
F
fengjiayi 已提交
187
  size_t numel_;   // cache of `product(dims_)`
188
  size_t offset_;  // marks the begin of tensor data area.
F
fengjiayi 已提交
189
};                 // namespace framework
Y
Yi Wang 已提交
190 191 192

}  // namespace framework
}  // namespace paddle