tensor_util.h 8.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
D
dzhwinter 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15

#pragma once
16
#include <vector>
W
wanghuancoder 已提交
17

Y
Yi Wang 已提交
18
#include "paddle/fluid/framework/data_type.h"
6
633WHU 已提交
19
#include "paddle/fluid/framework/dlpack_tensor.h"
Y
Yi Wang 已提交
20 21 22
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h"
D
dzhwinter 已提交
23 24 25 26

namespace paddle {
namespace framework {

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
class PrintOptions {
 public:
  static PrintOptions& Instance() {
    static PrintOptions instance;
    return instance;
  }
  ~PrintOptions() {}
  PrintOptions(const PrintOptions& o) = delete;
  const PrintOptions& operator=(const PrintOptions& o) = delete;

  int precision = 8;
  int threshold = 1000;
  int edgeitems = 3;
  int linewidth = 75;
  bool sci_mode = false;

 private:
  PrintOptions() {}
};

C
chengduo 已提交
47 48 49 50 51 52
// NOTE(zcd): Because TensorCopy is an async operation, when the src_place
// and dst_place are two different GPU, to ensure that the operation can
// be carried out correctly, there is a src_ctx wait operation in TensorCopy.
// If ctx_place and src_place are the same, src_ctx.Wait() is added
// after memory::Copy; if ctx_place and dst_place are the same,
// src_ctx.Wait() is added before memory::Copy.
W
wanghuancoder 已提交
53 54
class Tensor;

Y
Yi Wang 已提交
55
void TensorCopy(const Tensor& src, const platform::Place& dst_place,
F
fengjiayi 已提交
56
                const platform::DeviceContext& ctx, Tensor* dst);
C
chengduo 已提交
57 58 59 60 61 62 63 64

// NOTE(zcd): If the src.place() and dst_place are two different GPU,
// the copy operation is carried out on the dst_place's stream. This is
// very important, because TensorCopy is an async operator, and in most
// case, once this copy operator returns, dst is to be used in dst_place's
// stream, if this copy operation is carried out on the src_place's stream,
// when dst is used in dst_place's stream the copy operation may be
// not completed.
Y
Yi Wang 已提交
65 66
void TensorCopy(const Tensor& src, const platform::Place& dst_place,
                Tensor* dst);
C
chengduo 已提交
67

F
fengjiayi 已提交
68 69
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
                    Tensor* dst);
D
dzhwinter 已提交
70

Y
Yi Wang 已提交
71 72 73 74 75
template <typename T>
void TensorFromVector(const std::vector<T>& src,
                      const platform::DeviceContext& ctx, Tensor* dst);
template <typename T>
void TensorFromVector(const std::vector<T>& src, Tensor* dst);
D
dzhwinter 已提交
76

Y
Yi Wang 已提交
77 78 79 80 81
template <typename T>
void TensorToVector(const Tensor& src, const platform::DeviceContext& ctx,
                    std::vector<T>* dst);
template <typename T>
void TesnorToVector(const Tensor& src, std::vector<T>* dst);
D
dzhwinter 已提交
82

83
// copy the result bool to cpu
Y
Yi Wang 已提交
84 85
bool TensorContainsNAN(const framework::Tensor& tensor);
bool TensorContainsInf(const framework::Tensor& tensor);
86 87 88 89 90 91
bool TensorIsfinite(const framework::Tensor& tensor);

// store the result bool in gpu tensor, async operation. Faster than above ones.
void TensorContainsNAN(const framework::Tensor& tensor, framework::Tensor* out);
void TensorContainsInf(const framework::Tensor& tensor, framework::Tensor* out);
void TensorIsfinite(const framework::Tensor& tensor, framework::Tensor* out);
D
dzhwinter 已提交
92

Y
Yi Wang 已提交
93 94 95 96
void TensorToStream(std::ostream& os, const Tensor& tensor,
                    const platform::DeviceContext& dev_ctx);
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx);
T
tangwei12 已提交
97 98 99
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx,
                      const size_t& seek, const std::vector<int64_t>& shape);
D
dzhwinter 已提交
100

J
Jack Zhou 已提交
101 102 103 104 105 106 107
// store the bool result tensor in out tensor
void TensorContainsNANV2(const framework::Tensor& tensor,
                         framework::Tensor* out);
void TensorContainsInfV2(const framework::Tensor& tensor,
                         framework::Tensor* out);
void TensorIsfiniteV2(const framework::Tensor& tensor, framework::Tensor* out);

6
633WHU 已提交
108 109 110
// convert dlpack's DLTensor to tensor
void TensorFromDLPack(const ::DLTensor& dl_tensor, framework::Tensor* dst);

Y
Yi Wang 已提交
111 112 113
//
// The implementation of template functions.
//
D
dzhwinter 已提交
114

115 116 117 118 119 120 121 122 123 124 125
template <typename T>
void TensorFromArray(const T* src, const size_t& array_size,
                     const platform::DeviceContext& ctx, Tensor* dst) {
  auto dst_place = ctx.GetPlace();
  auto src_ptr = static_cast<const void*>(src);
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(array_size)});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<T>(dst_place));
  auto size = array_size * sizeof(T);

  if (platform::is_cpu_place(dst_place)) {
126 127
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 src_place, src_ptr, size);
128
  }
129
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
130 131
  else if (platform::is_gpu_place(dst_place)) {  // NOLINT
    memory::Copy(
132 133
        BOOST_GET_CONST(platform::CUDAPlace, dst_place), dst_ptr, src_place,
        src_ptr, size,
134 135 136 137
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream());
  }
#endif
}
D
dzhwinter 已提交
138
template <typename T>
Y
Yi Wang 已提交
139 140
void TensorFromVector(const std::vector<T>& src,
                      const platform::DeviceContext& ctx, Tensor* dst) {
D
dzhwinter 已提交
141 142 143 144 145 146 147 148
  auto dst_place = ctx.GetPlace();
  auto src_ptr = static_cast<const void*>(src.data());
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(src.size())});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<T>(dst_place));
  auto size = src.size() * sizeof(T);

  if (platform::is_cpu_place(dst_place)) {
149 150
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 src_place, src_ptr, size);
D
dzhwinter 已提交
151
  }
152
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
153 154
  else if (platform::is_gpu_place(dst_place)) {  // NOLINT
    memory::Copy(
155 156
        BOOST_GET_CONST(platform::CUDAPlace, dst_place), dst_ptr, src_place,
        src_ptr, size,
D
dzhwinter 已提交
157 158 159 160 161
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream());
  }
#endif
}

D
dzhwinter 已提交
162
template <typename T>
Y
Yi Wang 已提交
163
void TensorFromVector(const std::vector<T>& src, Tensor* dst) {
D
dzhwinter 已提交
164 165 166 167 168 169 170 171 172 173
  platform::CPUPlace dst_place = platform::CPUPlace();
  auto src_ptr = static_cast<const void*>(src.data());
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(src.size())});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<T>(dst_place));
  auto size = src.size() * sizeof(T);

  memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
}

D
dzhwinter 已提交
174
template <typename T>
Y
Yi Wang 已提交
175 176
void TensorToVector(const Tensor& src, const platform::DeviceContext& ctx,
                    std::vector<T>* dst) {
D
dzhwinter 已提交
177 178 179 180 181 182 183 184
  auto src_ptr = static_cast<const void*>(src.data<T>());
  auto size = src.numel() * sizeof(T);

  platform::CPUPlace dst_place;
  dst->resize(src.numel());
  auto dst_ptr = static_cast<void*>(dst->data());

  if (platform::is_cpu_place(src.place())) {
185
    memory::Copy(dst_place, dst_ptr,
186 187
                 BOOST_GET_CONST(platform::CPUPlace, src.place()), src_ptr,
                 size);
D
dzhwinter 已提交
188
  }
189
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
190 191
  else if (platform::is_gpu_place(src.place())) {  // NOLINT
    memory::Copy(
192
        dst_place, dst_ptr, BOOST_GET_CONST(platform::CUDAPlace, src.place()),
193
        src_ptr, size,
D
dzhwinter 已提交
194 195 196 197 198
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream());
  }
#endif
}

D
dzhwinter 已提交
199
template <typename T>
Y
Yi Wang 已提交
200
void TensorToVector(const Tensor& src, std::vector<T>* dst) {
D
dzhwinter 已提交
201 202 203 204 205 206 207
  auto src_ptr = static_cast<const void*>(src.data<T>());
  auto size = src.numel() * sizeof(T);

  platform::CPUPlace dst_place;
  dst->resize(src.numel());
  auto dst_ptr = static_cast<void*>(dst->data());

208 209 210 211 212
  PADDLE_ENFORCE_EQ(
      platform::is_cpu_place(src.place()), true,
      platform::errors::InvalidArgument(
          "The input tensor should be CPU device, but actually it is in %s.",
          src.place()));
D
dzhwinter 已提交
213

214 215
  memory::Copy(dst_place, dst_ptr,
               BOOST_GET_CONST(platform::CPUPlace, src.place()), src_ptr, size);
D
dzhwinter 已提交
216
}
217 218

std::ostream& operator<<(std::ostream& os, const Tensor& t);
D
dzhwinter 已提交
219 220
}  // namespace framework
}  // namespace paddle