tensor_util.h 19.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
D
dzhwinter 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15

#pragma once
S
Steffy-zxf 已提交
16 17 18 19 20
#include <algorithm>
#include <codecvt>
#include <locale>
#include <string>
#include <unordered_map>
21
#include <vector>
W
wanghuancoder 已提交
22

Y
Yi Wang 已提交
23
#include "paddle/fluid/framework/data_type.h"
6
633WHU 已提交
24
#include "paddle/fluid/framework/dlpack_tensor.h"
Y
Yi Wang 已提交
25
#include "paddle/fluid/framework/eigen.h"
S
Steffy-zxf 已提交
26
#include "paddle/fluid/framework/string_array.h"
Y
Yi Wang 已提交
27
#include "paddle/fluid/framework/tensor.h"
28 29 30 31
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
Y
Yi Wang 已提交
32
#include "paddle/fluid/platform/device_context.h"
F
fwenguang 已提交
33 34 35
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/device_context.h"
#endif
D
dzhwinter 已提交
36

37
#include "paddle/phi/core/dense_tensor.h"
38

D
dzhwinter 已提交
39 40 41
namespace paddle {
namespace framework {

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
class PrintOptions {
 public:
  static PrintOptions& Instance() {
    static PrintOptions instance;
    return instance;
  }
  ~PrintOptions() {}
  PrintOptions(const PrintOptions& o) = delete;
  const PrintOptions& operator=(const PrintOptions& o) = delete;

  int precision = 8;
  int threshold = 1000;
  int edgeitems = 3;
  int linewidth = 75;
  bool sci_mode = false;

 private:
  PrintOptions() {}
};

62 63
void TensorToStream(std::ostream& os,
                    const Tensor& tensor,
S
Steffy-zxf 已提交
64
                    const platform::DeviceContext& dev_ctx);
65 66
void TensorFromStream(std::istream& is,
                      Tensor* tensor,
S
Steffy-zxf 已提交
67
                      const platform::DeviceContext& dev_ctx);
68 69
void TensorFromStream(std::istream& is,
                      Tensor* tensor,
S
Steffy-zxf 已提交
70
                      const platform::DeviceContext& dev_ctx,
71 72
                      const size_t& seek,
                      const std::vector<int64_t>& shape);
S
Steffy-zxf 已提交
73

C
chengduo 已提交
74 75 76 77 78 79
// NOTE(zcd): Because TensorCopy is an async operation, when the src_place
// and dst_place are two different GPU, to ensure that the operation can
// be carried out correctly, there is a src_ctx wait operation in TensorCopy.
// If ctx_place and src_place are the same, src_ctx.Wait() is added
// after memory::Copy; if ctx_place and dst_place are the same,
// src_ctx.Wait() is added before memory::Copy.
80 81 82 83
void TensorCopy(const Tensor& src,
                const platform::Place& dst_place,
                const platform::DeviceContext& ctx,
                Tensor* dst);
C
chengduo 已提交
84 85 86 87 88 89 90 91

// NOTE(zcd): If the src.place() and dst_place are two different GPU,
// the copy operation is carried out on the dst_place's stream. This is
// very important, because TensorCopy is an async operator, and in most
// case, once this copy operator returns, dst is to be used in dst_place's
// stream, if this copy operation is carried out on the src_place's stream,
// when dst is used in dst_place's stream the copy operation may be
// not completed.
92 93
void TensorCopy(const Tensor& src,
                const platform::Place& dst_place,
Y
Yi Wang 已提交
94
                Tensor* dst);
C
chengduo 已提交
95

96 97
void TensorCopySync(const Tensor& src,
                    const platform::Place& dst_place,
F
fengjiayi 已提交
98
                    Tensor* dst);
D
dzhwinter 已提交
99

Y
Yi Wang 已提交
100 101
template <typename T>
void TensorFromVector(const std::vector<T>& src,
102 103
                      const platform::DeviceContext& ctx,
                      Tensor* dst);
Y
Yi Wang 已提交
104 105
template <typename T>
void TensorFromVector(const std::vector<T>& src, Tensor* dst);
D
dzhwinter 已提交
106

Y
Yi Wang 已提交
107
template <typename T>
108 109
void TensorToVector(const Tensor& src,
                    const platform::DeviceContext& ctx,
Y
Yi Wang 已提交
110 111 112
                    std::vector<T>* dst);
template <typename T>
void TesnorToVector(const Tensor& src, std::vector<T>* dst);
D
dzhwinter 已提交
113

114
// copy the result bool to cpu
Y
Yi Wang 已提交
115 116
bool TensorContainsNAN(const framework::Tensor& tensor);
bool TensorContainsInf(const framework::Tensor& tensor);
117 118 119 120 121 122
bool TensorIsfinite(const framework::Tensor& tensor);

// store the result bool in gpu tensor, async operation. Faster than above ones.
void TensorContainsNAN(const framework::Tensor& tensor, framework::Tensor* out);
void TensorContainsInf(const framework::Tensor& tensor, framework::Tensor* out);
void TensorIsfinite(const framework::Tensor& tensor, framework::Tensor* out);
D
dzhwinter 已提交
123

124 125
void TensorToStream(std::ostream& os,
                    const Tensor& tensor,
Y
Yi Wang 已提交
126
                    const platform::DeviceContext& dev_ctx);
127 128
void TensorFromStream(std::istream& is,
                      Tensor* tensor,
Y
Yi Wang 已提交
129
                      const platform::DeviceContext& dev_ctx);
130 131
void TensorFromStream(std::istream& is,
                      Tensor* tensor,
T
tangwei12 已提交
132
                      const platform::DeviceContext& dev_ctx,
133 134
                      const size_t& seek,
                      const std::vector<int64_t>& shape);
D
dzhwinter 已提交
135

J
Jack Zhou 已提交
136 137 138 139 140 141 142
// store the bool result tensor in out tensor
void TensorContainsNANV2(const framework::Tensor& tensor,
                         framework::Tensor* out);
void TensorContainsInfV2(const framework::Tensor& tensor,
                         framework::Tensor* out);
void TensorIsfiniteV2(const framework::Tensor& tensor, framework::Tensor* out);

6
633WHU 已提交
143 144 145
// convert dlpack's DLTensor to tensor
void TensorFromDLPack(const ::DLTensor& dl_tensor, framework::Tensor* dst);

Y
Yi Wang 已提交
146 147 148
//
// The implementation of template functions.
//
D
dzhwinter 已提交
149

150
template <typename T>
151 152 153 154
void TensorFromArray(const T* src,
                     const size_t& array_size,
                     const platform::DeviceContext& ctx,
                     Tensor* dst) {
155 156 157 158 159 160 161 162
  auto dst_place = ctx.GetPlace();
  auto src_ptr = static_cast<const void*>(src);
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(array_size)});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<T>(dst_place));
  auto size = array_size * sizeof(T);

  if (platform::is_cpu_place(dst_place)) {
163
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
164
  }
165
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
166
  else if (platform::is_gpu_place(dst_place)) {  // NOLINT
L
Leo Chen 已提交
167 168 169 170 171 172
    memory::Copy(dst_place,
                 dst_ptr,
                 src_place,
                 src_ptr,
                 size,
                 reinterpret_cast<const phi::GPUContext&>(ctx).stream());
173 174
  }
#endif
175 176 177 178 179 180 181
#ifdef PADDLE_WITH_ASCEND_CL
  else if (platform::is_npu_place(dst_place)) {  // NOLINT
    //  1. vector -> npu pinned tensor
    platform::NPUPinnedPlace npu_pinned_place;
    Tensor npu_pinned_tensor;
    npu_pinned_tensor.Resize(dst->dims());
    auto npu_pinned_ptr =
182
        npu_pinned_tensor.mutable_data(npu_pinned_place, dst->dtype());
183 184 185 186
    memory::Copy(npu_pinned_place, npu_pinned_ptr, src_place, src_ptr, size);

    //  2. async copy npu pinned tensor -> npu tensor
    memory::Copy(
187 188 189 190 191
        dst_place,
        dst_ptr,
        npu_pinned_place,
        npu_pinned_ptr,
        size,
192 193 194 195 196 197 198 199
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());

    //  3. record event
    auto npu_pinned_allocator =
        static_cast<paddle::memory::allocation::NPUPinnedAllocator*>(
            paddle::memory::allocation::AllocatorFacade::Instance()
                .GetAllocator(npu_pinned_place)
                .get());
200
    phi::Allocation* allocation = npu_pinned_tensor.Holder().get();
201 202 203 204 205
    npu_pinned_allocator->RecordEvent(
        allocation,
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());
  }
#endif
206 207 208 209 210
#ifdef PADDLE_WITH_MLU
  else if (platform::is_mlu_place(dst_place)) {  // NOLINT
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size, nullptr);
  }
#endif
211 212 213
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  else if (platform::is_custom_place(dst_place)) {  // NOLINT
    memory::Copy(
214 215 216 217 218
        dst_place,
        dst_ptr,
        src_place,
        src_ptr,
        size,
219 220 221 222 223 224 225
        reinterpret_cast<const platform::CustomDeviceContext&>(ctx).stream());
  }
#endif
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "TensorFromArray on %s is not supported.", dst_place));
  }
226
}
227

D
dzhwinter 已提交
228
template <typename T>
Y
Yi Wang 已提交
229
void TensorFromVector(const std::vector<T>& src,
230 231
                      const platform::DeviceContext& ctx,
                      Tensor* dst) {
D
dzhwinter 已提交
232 233 234 235 236 237 238 239
  auto dst_place = ctx.GetPlace();
  auto src_ptr = static_cast<const void*>(src.data());
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(src.size())});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<T>(dst_place));
  auto size = src.size() * sizeof(T);

  if (platform::is_cpu_place(dst_place)) {
240
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
D
dzhwinter 已提交
241
  }
242
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
243
  else if (platform::is_gpu_place(dst_place)) {  // NOLINT
L
Leo Chen 已提交
244 245 246 247 248 249
    memory::Copy(dst_place,
                 dst_ptr,
                 src_place,
                 src_ptr,
                 size,
                 reinterpret_cast<const phi::GPUContext&>(ctx).stream());
D
dzhwinter 已提交
250 251
  }
#endif
252
#ifdef PADDLE_WITH_ASCEND_CL
253 254 255 256 257 258
  // NOTE(zhiqiu): Becareful that aclrtMemcpyAsync is different from
  // cudaMemcpyAsync.
  // cudaMemcpyAsync is actually "sync" between cpu <-> gpu.
  // aclrtMemcpyAsync is really "async" between cpu <-> npu.
  // Since vector is on cpu, I think this function should be a "sync" operation,
  // so pass nullptr as stream to  memory::Copy().
259
  else if (platform::is_npu_place(dst_place)) {  // NOLINT
260
    //  1. vector -> npu pinned tensor
261
    Tensor npu_pinned_tensor(dst->dtype());
262 263 264 265 266 267 268
    platform::NPUPinnedPlace npu_pinned_place;
    auto npu_pinned_ptr =
        npu_pinned_tensor.mutable_data<T>(dst->dims(), npu_pinned_place);
    memory::Copy(npu_pinned_place, npu_pinned_ptr, src_place, src_ptr, size);

    //  2. async copy npu pinned tensor -> npu tensor
    memory::Copy(
269 270 271 272 273
        dst_place,
        dst_ptr,
        npu_pinned_place,
        npu_pinned_ptr,
        size,
274 275 276 277 278 279 280 281
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());

    //  3. record event
    auto npu_pinned_allocator =
        static_cast<paddle::memory::allocation::NPUPinnedAllocator*>(
            paddle::memory::allocation::AllocatorFacade::Instance()
                .GetAllocator(npu_pinned_place)
                .get());
282
    phi::Allocation* allocation = npu_pinned_tensor.Holder().get();
283 284 285
    npu_pinned_allocator->RecordEvent(
        allocation,
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());
286 287
  }
#endif
F
fwenguang 已提交
288
#ifdef PADDLE_WITH_MLU
F
fwenguang 已提交
289
  else if (platform::is_mlu_place(dst_place)) {  // NOLINT
290
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size, nullptr);
F
fwenguang 已提交
291 292
  }
#endif
293 294 295
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  else if (platform::is_custom_place(dst_place)) {  // NOLINT
    memory::Copy(
296 297 298 299 300
        dst_place,
        dst_ptr,
        src_place,
        src_ptr,
        size,
301 302
        reinterpret_cast<const platform::CustomDeviceContext&>(ctx).stream());
  }
303 304 305 306 307
#endif
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(dst_place)) {  // NOLINT
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
  }
308 309 310 311 312
#endif
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "TensorFromVector on %s is not supported.", dst_place));
  }
D
dzhwinter 已提交
313 314
}

315 316 317 318
// The fully specialized function should be inline to avoid
// multi-definition.
template <>
inline void TensorFromVector(const std::vector<bool>& src,
319 320
                             const platform::DeviceContext& ctx,
                             Tensor* dst) {
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
  // vector<bool> has no data() member, use array instead.
  // See details:
  // https://stackoverflow.com/questions/46115669/why-does-stdvectorbool-have-no-data/46115714
  bool* array = new bool[src.size()];
  for (unsigned int i = 0; i < src.size(); i++) {
    array[i] = static_cast<bool>(src[i]);
  }

  auto dst_place = ctx.GetPlace();
  auto src_ptr = static_cast<const void*>(array);
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(src.size())});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<bool>(dst_place));
  auto size = src.size() * sizeof(bool);

  if (platform::is_cpu_place(dst_place)) {
337
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
338 339 340
  }
#ifdef PADDLE_WITH_CUDA
  else if (platform::is_gpu_place(dst_place)) {  // NOLINT
L
Leo Chen 已提交
341 342 343 344 345 346
    memory::Copy(dst_place,
                 dst_ptr,
                 src_place,
                 src_ptr,
                 size,
                 reinterpret_cast<const phi::GPUContext&>(ctx).stream());
347 348 349 350
  }
#endif
#ifdef PADDLE_WITH_ASCEND_CL
  else if (platform::is_npu_place(dst_place)) {  // NOLINT
351 352 353 354 355
    //  1. vector -> npu pinned tensor
    platform::NPUPinnedPlace npu_pinned_place;
    Tensor npu_pinned_tensor;
    npu_pinned_tensor.Resize(dst->dims());
    auto npu_pinned_ptr =
356
        npu_pinned_tensor.mutable_data(npu_pinned_place, dst->dtype());
357 358 359 360
    memory::Copy(npu_pinned_place, npu_pinned_ptr, src_place, src_ptr, size);

    //  2. async copy npu pinned tensor -> npu tensor
    memory::Copy(
361 362 363 364 365
        dst_place,
        dst_ptr,
        npu_pinned_place,
        npu_pinned_ptr,
        size,
366 367 368 369 370 371 372 373
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());

    //  3. record event
    auto npu_pinned_allocator =
        static_cast<paddle::memory::allocation::NPUPinnedAllocator*>(
            paddle::memory::allocation::AllocatorFacade::Instance()
                .GetAllocator(npu_pinned_place)
                .get());
374
    phi::Allocation* allocation = npu_pinned_tensor.Holder().get();
375 376 377
    npu_pinned_allocator->RecordEvent(
        allocation,
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());
378 379
  }
#endif
380 381 382 383 384 385
#ifdef PADDLE_WITH_CUSTOM_DEICE
  else if (platform::is_custom_place(dst_place)) {  // NOLINT
    auto stream =
        reinterpret_cast<const platform::CustomDeviceContext&>(ctx).stream();
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size, stream);
  }
386 387 388 389 390
#endif
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(dst_place)) {  // NOLINT
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
  }
391 392 393 394 395
#endif
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "TensorFromVector on %s is not supported.", dst_place));
  }
396 397 398
  delete[] array;
}

D
dzhwinter 已提交
399
template <typename T>
Y
Yi Wang 已提交
400
void TensorFromVector(const std::vector<T>& src, Tensor* dst) {
D
dzhwinter 已提交
401 402 403 404 405 406 407 408 409 410
  platform::CPUPlace dst_place = platform::CPUPlace();
  auto src_ptr = static_cast<const void*>(src.data());
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(src.size())});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<T>(dst_place));
  auto size = src.size() * sizeof(T);

  memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
template <>
inline void TensorFromVector(const std::vector<bool>& src, Tensor* dst) {
  bool* array = new bool[src.size()];
  for (unsigned int i = 0; i < src.size(); i++) {
    array[i] = static_cast<bool>(src[i]);
  }
  platform::CPUPlace dst_place = platform::CPUPlace();
  auto src_ptr = static_cast<const void*>(array);
  platform::CPUPlace src_place;
  dst->Resize({static_cast<int64_t>(src.size())});
  auto dst_ptr = static_cast<void*>(dst->mutable_data<bool>(dst_place));
  auto size = src.size() * sizeof(bool);

  memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
  delete[] array;
}

D
dzhwinter 已提交
428
template <typename T>
429 430
void TensorToVector(const Tensor& src,
                    const platform::DeviceContext& ctx,
Y
Yi Wang 已提交
431
                    std::vector<T>* dst) {
D
dzhwinter 已提交
432 433 434 435 436 437 438 439
  auto src_ptr = static_cast<const void*>(src.data<T>());
  auto size = src.numel() * sizeof(T);

  platform::CPUPlace dst_place;
  dst->resize(src.numel());
  auto dst_ptr = static_cast<void*>(dst->data());

  if (platform::is_cpu_place(src.place())) {
440
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size);
D
dzhwinter 已提交
441
  }
442
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
443
  else if (platform::is_gpu_place(src.place())) {  // NOLINT
L
Leo Chen 已提交
444 445 446 447 448 449
    memory::Copy(dst_place,
                 dst_ptr,
                 src.place(),
                 src_ptr,
                 size,
                 reinterpret_cast<const phi::GPUContext&>(ctx).stream());
D
dzhwinter 已提交
450 451
  }
#endif
452 453
#if defined(PADDLE_WITH_XPU)
  else if (platform::is_xpu_place(src.place())) {  // NOLINT
454
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size);
455 456
  }
#endif
457 458
#ifdef PADDLE_WITH_ASCEND_CL
  else if (platform::is_npu_place(src.place())) {  // NOLINT
459
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size, nullptr);
460 461
  }
#endif
F
fwenguang 已提交
462 463 464
#ifdef PADDLE_WITH_MLU
  else if (platform::is_mlu_place(src.place())) {  // NOLINT
    memory::Copy(
465 466 467 468 469
        dst_place,
        dst_ptr,
        src.place(),
        src_ptr,
        size,
F
fwenguang 已提交
470 471 472
        reinterpret_cast<const platform::MLUDeviceContext&>(ctx).stream());
  }
#endif
473 474 475 476 477 478 479 480 481
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  else if (platform::is_custom_place(src.place())) {  // NOLINT
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size, nullptr);
  }
#endif
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "TensorToVector on %s is not supported.", src.place()));
  }
D
dzhwinter 已提交
482 483
}

484 485 486 487 488 489 490 491 492 493 494 495 496 497
template <>
inline void TensorToVector(const Tensor& src,
                           const platform::DeviceContext& ctx,
                           std::vector<bool>* dst) {
  auto src_ptr = static_cast<const void*>(src.data<bool>());
  auto size = src.numel() * sizeof(bool);

  bool* array = new bool[src.numel()];

  platform::CPUPlace dst_place;
  dst->resize(src.numel());
  auto dst_ptr = static_cast<void*>(array);

  if (platform::is_cpu_place(src.place())) {
498
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size);
499
  }
500
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
501
  else if (platform::is_gpu_place(src.place())) {  // NOLINT
L
Leo Chen 已提交
502 503 504 505 506 507
    memory::Copy(dst_place,
                 dst_ptr,
                 src.place(),
                 src_ptr,
                 size,
                 reinterpret_cast<const phi::GPUContext&>(ctx).stream());
508 509
  }
#endif
510 511
#if defined(PADDLE_WITH_XPU)
  else if (platform::is_xpu_place(src.place())) {  // NOLINT
512
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size);
513 514
  }
#endif
515 516
#ifdef PADDLE_WITH_ASCEND_CL
  else if (platform::is_npu_place(src.place())) {  // NOLINT
517
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size, nullptr);
518
  }
F
fwenguang 已提交
519 520 521
#endif
#ifdef PADDLE_WITH_MLU
  else if (platform::is_mlu_place(src.place())) {  // NOLINT
522
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size, nullptr);
F
fwenguang 已提交
523
  }
524 525 526 527 528
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  else if (platform::is_custom_place(src.place())) {  // NOLINT
    memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size, nullptr);
  }
529 530 531 532 533 534 535
#endif
  for (unsigned int i = 0; i < src.numel(); i++) {
    (*dst)[i] = static_cast<bool>(array[i]);
  }
  delete[] array;
}

D
dzhwinter 已提交
536
template <typename T>
Y
Yi Wang 已提交
537
void TensorToVector(const Tensor& src, std::vector<T>* dst) {
D
dzhwinter 已提交
538 539 540 541 542 543 544
  auto src_ptr = static_cast<const void*>(src.data<T>());
  auto size = src.numel() * sizeof(T);

  platform::CPUPlace dst_place;
  dst->resize(src.numel());
  auto dst_ptr = static_cast<void*>(dst->data());

545
  PADDLE_ENFORCE_EQ(
546 547
      platform::is_cpu_place(src.place()),
      true,
548 549 550
      platform::errors::InvalidArgument(
          "The input tensor should be CPU device, but actually it is in %s.",
          src.place()));
D
dzhwinter 已提交
551

552
  memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size);
D
dzhwinter 已提交
553
}
554

555 556 557 558 559 560 561 562 563 564 565 566
template <>
inline void TensorToVector(const Tensor& src, std::vector<bool>* dst) {
  auto src_ptr = static_cast<const void*>(src.data<bool>());
  auto size = src.numel() * sizeof(bool);

  bool* array = new bool[src.numel()];

  platform::CPUPlace dst_place;
  dst->resize(src.numel());
  auto dst_ptr = static_cast<void*>(array);

  PADDLE_ENFORCE_EQ(
567 568
      platform::is_cpu_place(src.place()),
      true,
569 570 571 572
      platform::errors::InvalidArgument(
          "The input tensor should be CPU device, but actually it is in %s.",
          src.place()));

573
  memory::Copy(dst_place, dst_ptr, src.place(), src_ptr, size);
574 575 576 577 578 579 580

  for (unsigned int i = 0; i < src.numel(); i++) {
    (*dst)[i] = static_cast<bool>(array[i]);
  }
  delete[] array;
}

581 582
std::ostream& operator<<(std::ostream& os, const LoD& lod);

D
dzhwinter 已提交
583 584
}  // namespace framework
}  // namespace paddle
585

586
namespace phi {
587 588
std::ostream& operator<<(std::ostream& os, const DenseTensor& t);
}