tensor.h 10.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yi Wang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#pragma once

17
#include <cstdint>
18
#include <cstring>
F
fengjiayi 已提交
19
#include <memory>
Y
Yu Yang 已提交
20
#include <typeindex>
21
#include <utility>
22
#include <vector>
W
wanghuancoder 已提交
23

Y
Yi Wang 已提交
24 25
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/ddim.h"
Y
Yu Yang 已提交
26
#include "paddle/fluid/framework/framework.pb.h"
Y
Yi Wang 已提交
27 28 29 30
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
F
fengjiayi 已提交
31

W
wanghuancoder 已提交
32 33 34 35 36 37 38 39
namespace paddle {
namespace memory {
namespace allocation {
class Allocation;
}  // namespace allocation
}  // namespace memory
}  // namespace paddle

Y
Yi Wang 已提交
40
namespace paddle {
L
liaogang 已提交
41

42
namespace framework {
Y
Yi Wang 已提交
43

44 45
class LoDTensor;

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
/*
 NOTE(liym27): [ What is TensorInplaceVersion used for? ]

 TensorInplaceVersion is a version counter and every Tensor has a version
 counter. It's used to check whether an inplace operation will result in an
 incorrect gradient calculation. Version is incremented when the data of the
 Variable is modified in place.

 - Question: In what scenarios will version counters be shared?
 - Answer: When two Variables/VarBases share the same C++ Tensor(its Allocation
 may change), both of them share the same version counter. For examples:
  1. `z = paddle.assign(input=x, output=y)`, `z` shares the same version counter
    of `y` because z and y is the same VarBase;
  2. `y = x.detach()`, `y` shares the same version counter of `x`.

 - Question: In what scenarios will version counters NOT be shared?
 - Answer: Replacing a `Variable`'s data by calling `Tensor::ShareDataWith(...)`
 or `Tensor::ShareBufferWith(...)`. Because they share the same Allocation but
 not framework::Tensor.

 - Question: Why put the inplace_version_counter_ in framework::Tensor instead
 of Allocation or Variable?
 - Answer:
  1. Tensor can call ResetHolder() to reset the corresponding Allocation so that
  the inplace_version_counter_ changes if it's in Allocation, which will lead to
  confusing information about inplace version.
  2. If inplace_version_counter_ is in Variable, different VariableWrappers
  should be able to share the same Variable. However, a VariableWrapper hold a
  Variable object but not a pointer.
*/

class TensorInplaceVersion {
 public:
  explicit TensorInplaceVersion(uint32_t inplace_version = 0)
      : inplace_version_(inplace_version) {}
  bool IsUnique() const { return inplace_version_ == 0; }
  void Bump() { ++inplace_version_; }
  uint32_t CurrentVersion() const { return inplace_version_; }

 private:
  uint32_t inplace_version_;
};

Y
Yi Wang 已提交
89
class Tensor {
M
mozga-intel 已提交
90 91 92
#ifdef PADDLE_WITH_MKLDNN

 public:
A
Adam 已提交
93
  inline mkldnn::memory::format_tag format() const { return format_; }
M
mozga-intel 已提交
94

A
Adam 已提交
95
  inline void set_format(const mkldnn::memory::format_tag format) {
96
    format_ = format;
M
mozga-intel 已提交
97 98 99 100 101 102 103
  }

 protected:
  /**
   * @brief the detail format of memory block which have layout as kMKLDNN
   *
   * @note MKLDNN lib support various memory format like nchw, nhwc, nChw8C,
104 105 106
   *       nChw16c, etc. For a MKLDNN memory block, layout will be set as
   *       DataLayout::kMKLDNN meanwhile detail memory format will be kept in
   *       this field.
M
mozga-intel 已提交
107
   */
108

A
Adam 已提交
109
  mkldnn::memory::format_tag format_ = mkldnn::memory::format_tag::undef;
M
mozga-intel 已提交
110 111
#endif

L
liaogang 已提交
112
 public:
113
  template <typename T, size_t D, int MajorType, typename IndexType>
Y
Yi Wang 已提交
114 115
  friend struct EigenTensor;

116
  template <typename T, int MajorType, typename IndexType>
F
WIP  
fengjiayi 已提交
117 118
  friend struct EigenMatrix;

119
  template <typename T, int MajorType, typename IndexType>
Y
Yi Wang 已提交
120 121
  friend struct EigenVector;

Y
Yi Wang 已提交
122
 public:
123 124 125 126
  Tensor()
      : type_(proto::VarType::FP32),
        offset_(0),
        inplace_version_counter_(std::make_shared<TensorInplaceVersion>(0)) {}
D
dzhwinter 已提交
127

C
chengduo 已提交
128
  explicit Tensor(const proto::VarType::Type&);
129

L
liaogang 已提交
130
  /*! Return a pointer to mutable memory block. */
Y
Yi Wang 已提交
131
  template <typename T>
132
  T* data();
Y
Yi Wang 已提交
133

L
liaogang 已提交
134
  /*! Return a pointer to constant memory block. */
Q
qijun 已提交
135
  template <typename T>
136
  const T* data() const;
L
liaogang 已提交
137

M
minqiyang 已提交
138
  inline bool IsInitialized() const;
Y
Yang Yang 已提交
139

L
liaogang 已提交
140 141 142 143 144
  /**
   * @brief   Return a pointer to mutable memory block.
   * @note    If not exist, then allocation.
   */
  template <typename T>
145
  T* mutable_data(const platform::Place& place, size_t requested_size = 0);
L
liaogang 已提交
146

147
  void* mutable_data(const platform::Place& place, proto::VarType::Type type,
148
                     size_t requested_size = 0);
149

150
  void* mutable_data(const platform::Place& place, size_t requested_size = 0);
151

L
liaogang 已提交
152 153 154
  /**
   * @brief     Return a pointer to mutable memory block.
   *
155 156 157
   * @param[in] dims           The dimensions of the memory block.
   * @param[in] place          The place of the memory block.
   * @param[in] requested_size The size of the block in bytes.
L
liaogang 已提交
158 159 160 161
   *
   * @note      If not exist, then allocation.
   */
  template <typename T>
162 163
  T* mutable_data(const DDim& dims, const platform::Place& place,
                  size_t requested_size = 0);
Y
Yi Wang 已提交
164

L
liaogang 已提交
165
  /*! Return the dimensions of the memory block. */
166
  const DDim& dims() const;
L
liaogang 已提交
167

168
  /*! Return the numel of the memory block. */
169
  int64_t numel() const;
170

L
liaogang 已提交
171
  /*! Resize the dimensions of the memory block. */
172
  Tensor& Resize(const DDim& dims);
L
liaogang 已提交
173 174

  /*! The internal of two tensors share the same memory block. */
175
  Tensor& ShareDataWith(const Tensor& src);
L
liaogang 已提交
176

177 178 179
  /*! The internal of two tensors share the same inplace version counter. */
  Tensor& ShareInplaceVersionCounterWith(const Tensor& src);

L
liaogang 已提交
180
  /**
181
   * @brief  Return a sub-tensor of the given tensor.
L
liaogang 已提交
182
   *
183 184 185 186
   * @param[in] begin_idx   The index of the start row(inclusive) to slice.
   *                        The index number begins from 0.
   * @param[in] end_idx     The index of the end row(exclusive) to slice.
   *                        The index number begins from 0.
L
liaogang 已提交
187
   */
C
chengduo 已提交
188
  Tensor Slice(int64_t begin_idx, int64_t end_idx) const;
189

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
  /**
   * @brief  Return a tensor list of the given tensor.
   *
   * @param[in] split_size  The size of tensor to be split along axis.
   * @param[in] axis        The axis along which to split.
   */
  std::vector<Tensor> Split(int64_t split_size, int64_t axis) const;

  /**
   * @brief  Return a tensor list of the given tensor.
   *
   * @param[in] chunks   The number of tensor to be split along axis.
   * @param[in] axis     The axis along which to split.
   */
  std::vector<Tensor> Chunk(int64_t chunks, int64_t axis) const;

206
  const platform::Place& place() const {
207
    PADDLE_ENFORCE_NOT_NULL(
208 209 210
        holder_,
        platform::errors::PreconditionNotMet(
            "Tensor not initialized yet when Tensor::place() is called."));
Y
Yu Yang 已提交
211 212
    return holder_->place();
  }
Q
qijun 已提交
213

Y
Yu Yang 已提交
214
  proto::VarType::Type type() const {
Q
Qiao Longfei 已提交
215
    PADDLE_ENFORCE_NOT_NULL(
216 217 218
        holder_,
        platform::errors::PreconditionNotMet(
            "Tensor not initialized yet when Tensor::type() is called."));
219
    return type_;
Q
Qiao Longfei 已提交
220
  }
Y
Yu Yang 已提交
221

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
  /**
   * [Add method get the saved type of tensor]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64), but the
   * type of dx to be recognized to be float32 by the grad Op relay on the type
   * of forward tensor x. But many of our ops have registered InplaceInferer,
   * covering the tensor memory of x with out, so as to save storage.
   *
   * In this case, the dim and type information recorded by x still exist,
   * but because x becomes an uninitialized tensor, The type of x record cannot
   * be obtained with x.type(), but the type is still valid here, so we
   * add saved_type(), This method SHOULD NOT be called by general scenarios.
   */
  proto::VarType::Type saved_type() const { return type_; }

Y
Yu Yang 已提交
240
  // memory size returns the holding memory size in byte.
Y
Yu Yang 已提交
241
  size_t memory_size() const;
Y
Yu Yang 已提交
242

243
  void check_memory_size() const;
L
liaogang 已提交
244

245
  DataLayout layout() const { return layout_; }
D
dzhwinter 已提交
246

247
  void set_layout(const DataLayout layout) { layout_ = layout; }
D
dzhwinter 已提交
248

249 250 251 252 253 254 255 256
  void clear() {
    holder_ = nullptr;
    offset_ = 0;
  }

  void ShareBufferWith(const Tensor& tensor) {
    holder_ = tensor.holder_;
    offset_ = tensor.offset_;
257 258 259 260
    // NOTE(chenfeiyu): when sharing buffer, by definition only holder
    // to the memory allocation and offset should be shared. Shape,
    // data type, layout, and other metadata associated with a Tensor
    // should not be copied.
261
  }
S
sneaxiy 已提交
262

263 264 265 266
  bool IsSharedBufferWith(const Tensor& src) const {
    return holder_ && holder_ == src.Holder();
  }

Y
Yu Yang 已提交
267
  const std::shared_ptr<memory::Allocation>& Holder() const { return holder_; }
Y
Yu Yang 已提交
268
  size_t offset() const { return offset_; }
Y
Yu Yang 已提交
269

S
sneaxiy 已提交
270
  std::shared_ptr<memory::Allocation> MoveMemoryHolder() {
S
sneaxiy 已提交
271 272 273
    return std::move(holder_);
  }

274 275
  void ResetHolder(std::shared_ptr<memory::Allocation> holder);

276
  void ResetHolderWithType(std::shared_ptr<memory::Allocation> holder,
277 278 279
                           const proto::VarType::Type& type);

  void set_type(const proto::VarType::Type& type);
280

281
  TensorInplaceVersion& InplaceVersionCounter() {
282
    return *inplace_version_counter_;
283
  }
284

L
liaogang 已提交
285 286
 private:
  /*! holds the memory block if allocated. */
287
  std::shared_ptr<memory::Allocation> holder_;
Y
Yu Yang 已提交
288
  proto::VarType::Type type_;
289 290 291 292 293 294
  /**
   * @brief points to elements dimensions.
   *
   * @note dims_ do not indicate the memory block size.
   */

295
  DDim dims_;
L
liaogang 已提交
296

D
dzhwinter 已提交
297
  /**
D
dzhwinter 已提交
298
   * @brief the layout of memory block, default is NHWC.
D
dzhwinter 已提交
299 300 301 302 303 304 305 306
   *
   * @note the memory allocation order, describe how weight/data is stored
   *       For example, in 4-D Tensor(rank=4), there are three commonly
   *       used layout. They are
   *            NCHW, NHWC, CHWN.
   *       N,C,H,W for respectively the batch size, the number of
   *       feature maps, the height.
   */
M
mozga-intel 已提交
307 308 309 310
  // Fix me: here just change the default layout to kNCHW
  // it doesn't fix the real issue, i.e. feeder should set up tensor layout
  // according to actual input data
  DataLayout layout_ = DataLayout::kNCHW;
D
dzhwinter 已提交
311

L
liaogang 已提交
312 313 314 315 316 317 318
  /**
   * @brief   A PlaceHolder may be shared by more than one tensor.
   *
   * @note    Some of them may be slices of the others. So the offset_
   *          is introduced here to indicate the byte offset between
   *          PlaceHolder::ptr_ and where the tensor data really begins.
   */
F
fengjiayi 已提交
319
  size_t offset_;
320
  std::shared_ptr<TensorInplaceVersion> inplace_version_counter_;
321
};
Y
Yi Wang 已提交
322 323 324

}  // namespace framework
}  // namespace paddle
L
liaogang 已提交
325

Y
Yi Wang 已提交
326
#include "paddle/fluid/framework/tensor_impl.h"