tensor.h 11.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yi Wang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#pragma once

17
#include <cstdint>
18
#include <cstring>
F
fengjiayi 已提交
19
#include <memory>
Y
Yu Yang 已提交
20
#include <typeindex>
21
#include <utility>
22
#include <vector>
W
wanghuancoder 已提交
23

Y
Yi Wang 已提交
24 25
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/ddim.h"
Y
Yu Yang 已提交
26
#include "paddle/fluid/framework/framework.pb.h"
Y
Yi Wang 已提交
27 28 29 30
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
F
fengjiayi 已提交
31

W
wanghuancoder 已提交
32 33 34 35 36 37 38 39
namespace paddle {
namespace memory {
namespace allocation {
class Allocation;
}  // namespace allocation
}  // namespace memory
}  // namespace paddle

Y
Yi Wang 已提交
40
namespace paddle {
L
liaogang 已提交
41

42
namespace framework {
Y
Yi Wang 已提交
43

44 45
class LoDTensor;

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
/*
 NOTE(liym27): [ What is TensorInplaceVersion used for? ]

 TensorInplaceVersion is a version counter and every Tensor has a version
 counter. It's used to check whether an inplace operation will result in an
 incorrect gradient calculation. Version is incremented when the data of the
 Variable is modified in place.

 - Question: In what scenarios will version counters be shared?
 - Answer: When two Variables/VarBases share the same C++ Tensor(its Allocation
 may change), both of them share the same version counter. For examples:
  1. `z = paddle.assign(input=x, output=y)`, `z` shares the same version counter
    of `y` because z and y is the same VarBase;
  2. `y = x.detach()`, `y` shares the same version counter of `x`.

 - Question: In what scenarios will version counters NOT be shared?
 - Answer: Replacing a `Variable`'s data by calling `Tensor::ShareDataWith(...)`
 or `Tensor::ShareBufferWith(...)`. Because they share the same Allocation but
 not framework::Tensor.

 - Question: Why put the inplace_version_counter_ in framework::Tensor instead
 of Allocation or Variable?
 - Answer:
  1. Tensor can call ResetHolder() to reset the corresponding Allocation so that
  the inplace_version_counter_ changes if it's in Allocation, which will lead to
  confusing information about inplace version.
  2. If inplace_version_counter_ is in Variable, different VariableWrappers
  should be able to share the same Variable. However, a VariableWrapper hold a
  Variable object but not a pointer.
*/

class TensorInplaceVersion {
 public:
  explicit TensorInplaceVersion(uint32_t inplace_version = 0)
      : inplace_version_(inplace_version) {}
  bool IsUnique() const { return inplace_version_ == 0; }
  void Bump() { ++inplace_version_; }
  uint32_t CurrentVersion() const { return inplace_version_; }
84
  void SetInplaceVersionToZero() { inplace_version_ = 0; }
85 86 87 88 89

 private:
  uint32_t inplace_version_;
};

Y
Yi Wang 已提交
90
class Tensor {
M
mozga-intel 已提交
91 92 93
#ifdef PADDLE_WITH_MKLDNN

 public:
94
  inline dnnl::memory::format_tag format() const { return format_; }
M
mozga-intel 已提交
95

96
  inline void set_format(const dnnl::memory::format_tag format) {
97
    format_ = format;
M
mozga-intel 已提交
98 99 100 101 102 103 104
  }

 protected:
  /**
   * @brief the detail format of memory block which have layout as kMKLDNN
   *
   * @note MKLDNN lib support various memory format like nchw, nhwc, nChw8C,
105 106 107
   *       nChw16c, etc. For a MKLDNN memory block, layout will be set as
   *       DataLayout::kMKLDNN meanwhile detail memory format will be kept in
   *       this field.
M
mozga-intel 已提交
108
   */
109

110
  dnnl::memory::format_tag format_ = dnnl::memory::format_tag::undef;
M
mozga-intel 已提交
111 112
#endif

L
liaogang 已提交
113
 public:
114
  template <typename T, size_t D, int MajorType, typename IndexType>
Y
Yi Wang 已提交
115 116
  friend struct EigenTensor;

117
  template <typename T, int MajorType, typename IndexType>
F
WIP  
fengjiayi 已提交
118 119
  friend struct EigenMatrix;

120
  template <typename T, int MajorType, typename IndexType>
Y
Yi Wang 已提交
121 122
  friend struct EigenVector;

Y
Yi Wang 已提交
123
 public:
124 125 126 127
  Tensor()
      : type_(proto::VarType::FP32),
        offset_(0),
        inplace_version_counter_(std::make_shared<TensorInplaceVersion>(0)) {}
D
dzhwinter 已提交
128

C
chengduo 已提交
129
  explicit Tensor(const proto::VarType::Type&);
130

L
liaogang 已提交
131
  /*! Return a pointer to mutable memory block. */
Y
Yi Wang 已提交
132
  template <typename T>
133
  T* data();
Y
Yi Wang 已提交
134

L
liaogang 已提交
135
  /*! Return a pointer to constant memory block. */
Q
qijun 已提交
136
  template <typename T>
137
  const T* data() const;
L
liaogang 已提交
138

M
minqiyang 已提交
139
  inline bool IsInitialized() const;
Y
Yang Yang 已提交
140

L
liaogang 已提交
141 142 143 144 145
  /**
   * @brief   Return a pointer to mutable memory block.
   * @note    If not exist, then allocation.
   */
  template <typename T>
146
  T* mutable_data(const platform::Place& place, size_t requested_size = 0);
L
liaogang 已提交
147

148
  void* mutable_data(const platform::Place& place, proto::VarType::Type type,
149
                     size_t requested_size = 0);
150

151
  void* mutable_data(const platform::Place& place, size_t requested_size = 0);
152

153 154 155 156 157
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  void* mutable_data(const platform::CUDAPlace& place,
                     proto::VarType::Type type, const gpuStream_t& stream);
#endif

L
liaogang 已提交
158 159 160
  /**
   * @brief     Return a pointer to mutable memory block.
   *
161 162 163
   * @param[in] dims           The dimensions of the memory block.
   * @param[in] place          The place of the memory block.
   * @param[in] requested_size The size of the block in bytes.
L
liaogang 已提交
164 165 166 167
   *
   * @note      If not exist, then allocation.
   */
  template <typename T>
168 169
  T* mutable_data(const DDim& dims, const platform::Place& place,
                  size_t requested_size = 0);
Y
Yi Wang 已提交
170

L
liaogang 已提交
171
  /*! Return the dimensions of the memory block. */
172
  const DDim& dims() const;
L
liaogang 已提交
173

174
  /*! Return the numel of the memory block. */
175
  int64_t numel() const;
176

L
liaogang 已提交
177
  /*! Resize the dimensions of the memory block. */
178
  Tensor& Resize(const DDim& dims);
L
liaogang 已提交
179 180

  /*! The internal of two tensors share the same memory block. */
181
  Tensor& ShareDataWith(const Tensor& src);
L
liaogang 已提交
182

183 184 185
  /*! The internal of two tensors share the same inplace version counter. */
  Tensor& ShareInplaceVersionCounterWith(const Tensor& src);

L
liaogang 已提交
186
  /**
187
   * @brief  Return a sub-tensor of the given tensor.
L
liaogang 已提交
188
   *
189 190 191 192
   * @param[in] begin_idx   The index of the start row(inclusive) to slice.
   *                        The index number begins from 0.
   * @param[in] end_idx     The index of the end row(exclusive) to slice.
   *                        The index number begins from 0.
L
liaogang 已提交
193
   */
C
chengduo 已提交
194
  Tensor Slice(int64_t begin_idx, int64_t end_idx) const;
195

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
  /**
   * @brief  Return a tensor list of the given tensor.
   *
   * @param[in] split_size  The size of tensor to be split along axis.
   * @param[in] axis        The axis along which to split.
   */
  std::vector<Tensor> Split(int64_t split_size, int64_t axis) const;

  /**
   * @brief  Return a tensor list of the given tensor.
   *
   * @param[in] chunks   The number of tensor to be split along axis.
   * @param[in] axis     The axis along which to split.
   */
  std::vector<Tensor> Chunk(int64_t chunks, int64_t axis) const;

212
  const platform::Place& place() const {
213
    PADDLE_ENFORCE_NOT_NULL(
214 215 216
        holder_,
        platform::errors::PreconditionNotMet(
            "Tensor not initialized yet when Tensor::place() is called."));
Y
Yu Yang 已提交
217 218
    return holder_->place();
  }
Q
qijun 已提交
219

Y
Yu Yang 已提交
220
  proto::VarType::Type type() const {
Q
Qiao Longfei 已提交
221
    PADDLE_ENFORCE_NOT_NULL(
222 223 224
        holder_,
        platform::errors::PreconditionNotMet(
            "Tensor not initialized yet when Tensor::type() is called."));
225
    return type_;
Q
Qiao Longfei 已提交
226
  }
Y
Yu Yang 已提交
227

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
  /**
   * [Add method get the saved type of tensor]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64), but the
   * type of dx to be recognized to be float32 by the grad Op relay on the type
   * of forward tensor x. But many of our ops have registered InplaceInferer,
   * covering the tensor memory of x with out, so as to save storage.
   *
   * In this case, the dim and type information recorded by x still exist,
   * but because x becomes an uninitialized tensor, The type of x record cannot
   * be obtained with x.type(), but the type is still valid here, so we
   * add saved_type(), This method SHOULD NOT be called by general scenarios.
   */
  proto::VarType::Type saved_type() const { return type_; }

Y
Yu Yang 已提交
246
  // memory size returns the holding memory size in byte.
Y
Yu Yang 已提交
247
  size_t memory_size() const;
Y
Yu Yang 已提交
248

249
  void check_memory_size() const;
L
liaogang 已提交
250

251
  DataLayout layout() const { return layout_; }
D
dzhwinter 已提交
252

253
  void set_layout(const DataLayout layout) { layout_ = layout; }
D
dzhwinter 已提交
254

255 256 257 258 259 260 261 262
  void clear() {
    holder_ = nullptr;
    offset_ = 0;
  }

  void ShareBufferWith(const Tensor& tensor) {
    holder_ = tensor.holder_;
    offset_ = tensor.offset_;
263 264 265 266
    // NOTE(chenfeiyu): when sharing buffer, by definition only holder
    // to the memory allocation and offset should be shared. Shape,
    // data type, layout, and other metadata associated with a Tensor
    // should not be copied.
267
  }
S
sneaxiy 已提交
268

269 270
  void ShareDataTypeWith(const Tensor& tensor) { type_ = tensor.type_; }

271 272 273 274
  bool IsSharedBufferWith(const Tensor& src) const {
    return holder_ && holder_ == src.Holder();
  }

Y
Yu Yang 已提交
275
  const std::shared_ptr<memory::Allocation>& Holder() const { return holder_; }
Y
Yu Yang 已提交
276
  size_t offset() const { return offset_; }
Y
Yu Yang 已提交
277

S
sneaxiy 已提交
278
  std::shared_ptr<memory::Allocation> MoveMemoryHolder() {
S
sneaxiy 已提交
279 280 281
    return std::move(holder_);
  }

282 283
  void ResetHolder(std::shared_ptr<memory::Allocation> holder);

284
  void ResetHolderWithType(std::shared_ptr<memory::Allocation> holder,
285 286 287
                           const proto::VarType::Type& type);

  void set_type(const proto::VarType::Type& type);
288

289
  TensorInplaceVersion& InplaceVersionCounter() {
290
    return *inplace_version_counter_;
291
  }
292

L
liaogang 已提交
293 294
 private:
  /*! holds the memory block if allocated. */
295
  std::shared_ptr<memory::Allocation> holder_;
Y
Yu Yang 已提交
296
  proto::VarType::Type type_;
297 298 299 300 301 302
  /**
   * @brief points to elements dimensions.
   *
   * @note dims_ do not indicate the memory block size.
   */

303
  DDim dims_;
L
liaogang 已提交
304

D
dzhwinter 已提交
305
  /**
D
dzhwinter 已提交
306
   * @brief the layout of memory block, default is NHWC.
D
dzhwinter 已提交
307 308 309 310 311 312 313 314
   *
   * @note the memory allocation order, describe how weight/data is stored
   *       For example, in 4-D Tensor(rank=4), there are three commonly
   *       used layout. They are
   *            NCHW, NHWC, CHWN.
   *       N,C,H,W for respectively the batch size, the number of
   *       feature maps, the height.
   */
M
mozga-intel 已提交
315 316 317 318
  // Fix me: here just change the default layout to kNCHW
  // it doesn't fix the real issue, i.e. feeder should set up tensor layout
  // according to actual input data
  DataLayout layout_ = DataLayout::kNCHW;
D
dzhwinter 已提交
319

L
liaogang 已提交
320 321 322 323 324 325 326
  /**
   * @brief   A PlaceHolder may be shared by more than one tensor.
   *
   * @note    Some of them may be slices of the others. So the offset_
   *          is introduced here to indicate the byte offset between
   *          PlaceHolder::ptr_ and where the tensor data really begins.
   */
F
fengjiayi 已提交
327
  size_t offset_;
328
  std::shared_ptr<TensorInplaceVersion> inplace_version_counter_;
329
};
Y
Yi Wang 已提交
330 331 332

}  // namespace framework
}  // namespace paddle
L
liaogang 已提交
333

Y
Yi Wang 已提交
334
#include "paddle/fluid/framework/tensor_impl.h"