tensor.h 7.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yi Wang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#pragma once

17
#include <cstdint>
18
#include <cstring>
F
fengjiayi 已提交
19
#include <memory>
Y
Yu Yang 已提交
20
#include <typeindex>
21 22
#include <vector>

Y
Yi Wang 已提交
23 24 25 26 27 28
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
F
fengjiayi 已提交
29

Y
Yi Wang 已提交
30
namespace paddle {
L
liaogang 已提交
31

32
namespace framework {
Y
Yi Wang 已提交
33

34 35
class LoDTensor;

Y
Yi Wang 已提交
36
class Tensor {
M
mozga-intel 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
#ifdef PADDLE_WITH_MKLDNN

 public:
  inline mkldnn::memory::format format() const { return format_; }

  inline void set_format(const mkldnn::memory::format format) {
    format_ = format;
  }

 protected:
  /**
   * @brief the detail format of memory block which have layout as kMKLDNN
   *
   * @note MKLDNN lib support various memory format like nchw, nhwc, nChw8C,
   *       nChw16c, etc. For a MKLDNN memory block, layout will be set as
   *       DataLayout::kMKLDNN meanwhile detail memory format will be kept in
   *       this field.
   */

  mkldnn::memory::format format_ = mkldnn::memory::format::format_undef;
#endif

L
liaogang 已提交
59
 public:
60
  template <typename T, size_t D, int MajorType, typename IndexType>
Y
Yi Wang 已提交
61 62
  friend struct EigenTensor;

63
  template <typename T, int MajorType, typename IndexType>
F
WIP  
fengjiayi 已提交
64 65
  friend struct EigenMatrix;

66
  template <typename T, int MajorType, typename IndexType>
Y
Yi Wang 已提交
67 68
  friend struct EigenVector;

Y
Yi Wang 已提交
69
 public:
C
chengduoZH 已提交
70
  Tensor() : offset_(0) {}
71

D
dzhwinter 已提交
72
  /*! Constructor with place should only be used in pybind. */
C
chengduoZH 已提交
73
  explicit Tensor(const platform::Place& place) : offset_(0) {
D
dzhwinter 已提交
74 75 76
    holder_->set_place(place);
  }

L
liaogang 已提交
77
  /*! Return a pointer to mutable memory block. */
Y
Yi Wang 已提交
78
  template <typename T>
79
  T* data();
Y
Yi Wang 已提交
80

L
liaogang 已提交
81
  /*! Return a pointer to constant memory block. */
Q
qijun 已提交
82
  template <typename T>
83
  const T* data() const;
L
liaogang 已提交
84

M
minqiyang 已提交
85
  inline bool IsInitialized() const;
Y
Yang Yang 已提交
86

L
liaogang 已提交
87 88 89 90 91
  /**
   * @brief   Return a pointer to mutable memory block.
   * @note    If not exist, then allocation.
   */
  template <typename T>
92
  T* mutable_data(platform::Place place, size_t requested_size = 0);
L
liaogang 已提交
93

94
  void* mutable_data(platform::Place place, std::type_index type,
95
                     size_t requested_size = 0);
96

97
  void* mutable_data(platform::Place place, size_t requested_size = 0);
98

L
liaogang 已提交
99 100 101
  /**
   * @brief     Return a pointer to mutable memory block.
   *
102 103 104
   * @param[in] dims           The dimensions of the memory block.
   * @param[in] place          The place of the memory block.
   * @param[in] requested_size The size of the block in bytes.
L
liaogang 已提交
105 106 107 108
   *
   * @note      If not exist, then allocation.
   */
  template <typename T>
109
  T* mutable_data(DDim dims, platform::Place place, size_t requested_size = 0);
Y
Yi Wang 已提交
110

L
liaogang 已提交
111
  /*! Return the dimensions of the memory block. */
112
  const DDim& dims() const;
L
liaogang 已提交
113

114
  /*! Return the numel of the memory block. */
115
  int64_t numel() const;
116

L
liaogang 已提交
117
  /*! Resize the dimensions of the memory block. */
118
  Tensor& Resize(const DDim& dims);
L
liaogang 已提交
119 120

  /*! The internal of two tensors share the same memory block. */
121
  Tensor& ShareDataWith(const Tensor& src);
L
liaogang 已提交
122 123

  /**
124
   * @brief  Return a sub-tensor of the given tensor.
L
liaogang 已提交
125
   *
126 127 128 129
   * @param[in] begin_idx   The index of the start row(inclusive) to slice.
   *                        The index number begins from 0.
   * @param[in] end_idx     The index of the end row(exclusive) to slice.
   *                        The index number begins from 0.
L
liaogang 已提交
130
   */
131
  Tensor Slice(int begin_idx, int end_idx) const;
132

Y
Yu Yang 已提交
133
  platform::Place place() const {
134
    PADDLE_ENFORCE_NOT_NULL(
Q
Qiao Longfei 已提交
135
        holder_, "Tensor not initialized yet when Tensor::place() is called.");
Y
Yu Yang 已提交
136 137
    return holder_->place();
  }
Q
qijun 已提交
138

Q
Qiao Longfei 已提交
139 140 141 142 143
  std::type_index type() const {
    PADDLE_ENFORCE_NOT_NULL(
        holder_, "Tensor not initialized yet when Tensor::type() is called.");
    return holder_->type();
  }
Y
Yu Yang 已提交
144

Y
Yu Yang 已提交
145
  // memory size returns the holding memory size in byte.
Y
Yu Yang 已提交
146
  size_t memory_size() const;
Y
Yu Yang 已提交
147

148
  void check_memory_size() const;
L
liaogang 已提交
149

150
  DataLayout layout() const { return layout_; }
D
dzhwinter 已提交
151

152
  void set_layout(const DataLayout layout) { layout_ = layout; }
D
dzhwinter 已提交
153

S
sneaxiy 已提交
154 155
  void clear() { holder_ = nullptr; }

L
liaogang 已提交
156 157 158 159 160
 private:
  /**
   * @note    Placeholder hides type T, so it doesn't appear as a template
   *          parameter of Variable.
   */
Y
Yi Wang 已提交
161
  struct Placeholder {
162
    virtual ~Placeholder() = default;
F
fengjiayi 已提交
163 164
    virtual void* ptr() const = 0;
    virtual size_t size() const = 0;
Y
Yu Yang 已提交
165
    virtual std::type_index type() const = 0;
L
liaogang 已提交
166
    virtual platform::Place place() const = 0;
167
    virtual void set_type(std::type_index type) = 0;
D
dzhwinter 已提交
168
    virtual void set_place(platform::Place place) = 0;
Y
Yi Wang 已提交
169 170
  };

171
  template <typename Place>
Y
Yi Wang 已提交
172
  struct PlaceholderImpl : public Placeholder {
C
chengduoZH 已提交
173 174 175
    PlaceholderImpl(Place place, size_t size, std::type_index type)
        : ptr_(static_cast<uint8_t*>(memory::Alloc(place, size)),
               memory::PODDeleter<uint8_t, Place>(place)),
L
liaogang 已提交
176
          place_(place),
177
          size_(size),
C
chengduoZH 已提交
178
          type_(type) {
Y
Yan Chunwei 已提交
179 180
      PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.",
                              (is_cpu_place(place_) ? "CPU" : "GPU"));
L
liaogang 已提交
181
    }
Y
Yi Wang 已提交
182

F
fengjiayi 已提交
183
    virtual size_t size() const { return size_; }
L
liaogang 已提交
184 185
    virtual platform::Place place() const { return place_; }
    virtual void* ptr() const { return static_cast<void*>(ptr_.get()); }
186 187
    virtual std::type_index type() const { return type_; }
    virtual void set_type(std::type_index type) { type_ = type; }
D
dzhwinter 已提交
188
    virtual void set_place(platform::Place place) { place_ = place; }
Y
Yi Wang 已提交
189

L
liaogang 已提交
190
    /*! the pointer of memory block. */
191
    std::unique_ptr<uint8_t, memory::PODDeleter<uint8_t, Place>> ptr_;
L
liaogang 已提交
192 193 194 195 196 197

    /*! the place of memory block. */
    platform::Place place_;

    /*! the size of memory block. */
    size_t size_;
198 199 200

    /* the current type of memory */
    std::type_index type_;
Y
Yi Wang 已提交
201 202
  };

L
liaogang 已提交
203 204 205
  /*! holds the memory block if allocated. */
  std::shared_ptr<Placeholder> holder_;

206 207 208 209 210 211
  /**
   * @brief points to elements dimensions.
   *
   * @note dims_ do not indicate the memory block size.
   */

212
  DDim dims_;
L
liaogang 已提交
213

D
dzhwinter 已提交
214
  /**
D
dzhwinter 已提交
215
   * @brief the layout of memory block, default is NHWC.
D
dzhwinter 已提交
216 217 218 219 220 221 222 223
   *
   * @note the memory allocation order, describe how weight/data is stored
   *       For example, in 4-D Tensor(rank=4), there are three commonly
   *       used layout. They are
   *            NCHW, NHWC, CHWN.
   *       N,C,H,W for respectively the batch size, the number of
   *       feature maps, the height.
   */
M
mozga-intel 已提交
224 225 226 227
  // Fix me: here just change the default layout to kNCHW
  // it doesn't fix the real issue, i.e. feeder should set up tensor layout
  // according to actual input data
  DataLayout layout_ = DataLayout::kNCHW;
D
dzhwinter 已提交
228

L
liaogang 已提交
229 230 231 232 233 234 235
  /**
   * @brief   A PlaceHolder may be shared by more than one tensor.
   *
   * @note    Some of them may be slices of the others. So the offset_
   *          is introduced here to indicate the byte offset between
   *          PlaceHolder::ptr_ and where the tensor data really begins.
   */
F
fengjiayi 已提交
236
  size_t offset_;
237
};
Y
Yi Wang 已提交
238 239 240

}  // namespace framework
}  // namespace paddle
L
liaogang 已提交
241

Y
Yi Wang 已提交
242
#include "paddle/fluid/framework/tensor_impl.h"