tensor_util.cc 49.9 KB
Newer Older
Y
Yang Yu 已提交
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yang Yu 已提交
6

7 8 9 10 11 12 13
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yang Yu 已提交
14

C
chengduoZH 已提交
15 16
#include <algorithm>
#include <limits>
C
chengduo 已提交
17
#include <memory>
18
#include <string>
C
chengduo 已提交
19
#include <utility>
C
chengduoZH 已提交
20
#include <vector>
21

Y
yuyang18 已提交
22
#include "paddle/fluid/framework/data_type.h"
S
Steffy-zxf 已提交
23
#include "paddle/fluid/framework/tensor_util.h"
24
#include "paddle/fluid/platform/complex.h"
25
#include "paddle/fluid/platform/profiler.h"
26
#ifdef PADDLE_WITH_MKLDNN
27
#include "dnnl_debug.h"  // NOLINT
28
#endif
Y
Yang Yu 已提交
29 30 31

namespace paddle {
namespace framework {
Y
Yi Wang 已提交
32 33

void TensorCopy(const Tensor& src, const platform::Place& dst_place,
F
fengjiayi 已提交
34
                const platform::DeviceContext& ctx, Tensor* dst) {
35 36 37 38 39 40
  if (&src == dst) {
    auto src_copy = src;
    TensorCopy(src_copy, dst_place, ctx, dst);
    return;
  }

M
minqiyang 已提交
41 42
  VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
          << dst_place;
Y
Yi Wang 已提交
43 44 45 46 47 48
  src.check_memory_size();

  dst->Resize(src.dims());
  dst->set_layout(src.layout());
  auto src_place = src.place();
  auto src_ptr = src.data<void>();
49 50 51 52 53 54 55 56 57
#ifdef PADDLE_WITH_MKLDNN
  dst->set_format(src.format());
  // oneDNN tensors due to padding may be of bigger size
  // than numel()*size(type())
  auto dst_ptr =
      src.layout() == DataLayout::kMKLDNN
          ? dst->mutable_data(dst_place, src.type(), src.memory_size())
          : dst->mutable_data(dst_place, src.type());
#else
Y
Yi Wang 已提交
58
  auto dst_ptr = dst->mutable_data(dst_place, src.type());
59
#endif
60 61 62 63 64
  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data async from " << src_place << " to "
            << dst_place;
    return;
  }
65
  VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr;
66

67 68 69 70 71
#ifdef PADDLE_WITH_MKLDNN
  auto size = src.layout() == DataLayout::kMKLDNN
                  ? src.memory_size()
                  : src.numel() * SizeOfType(src.type());
#else
Y
Yi Wang 已提交
72
  auto size = src.numel() * SizeOfType(src.type());
73
#endif
Y
Yi Wang 已提交
74 75

  if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
76 77
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
Y
Yi Wang 已提交
78
  }
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else if (platform::is_cpu_place(src_place) &&
             platform::is_xpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
  } else if (platform::is_xpu_place(src_place) &&
             platform::is_xpu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
102 103 104 105 106 107 108 109 110 111 112 113
#ifdef PADDLE_WITH_ASCEND_CL
  // TODO(zhiqiu): handle different condition like CUDA code below
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 stream);
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
    //  1. cpu tensor -> npu pinned tensor
    platform::NPUPinnedPlace npu_pinned_place;
    Tensor npu_pinned_tensor;
    npu_pinned_tensor.Resize(src.dims());
    auto npu_pinned_ptr =
        npu_pinned_tensor.mutable_data(npu_pinned_place, src.type());
    memory::Copy(npu_pinned_place, npu_pinned_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);

    //  2. async copy npu pinned tensor -> npu tensor
    memory::Copy(
        BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
        npu_pinned_place, npu_pinned_ptr, size,
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());

    //  3. record event
    auto npu_pinned_allocator =
        static_cast<paddle::memory::allocation::NPUPinnedAllocator*>(
            paddle::memory::allocation::AllocatorFacade::Instance()
                .GetAllocator(npu_pinned_place)
                .get());
    paddle::memory::allocation::Allocation* allocation =
        npu_pinned_tensor.Holder().get();
    npu_pinned_allocator->RecordEvent(
        allocation,
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream());
140 141 142 143 144 145 146 147 148 149 150 151 152 153
  }
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 stream);
  }
W
WangXi 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
  else if (platform::is_npu_pinned_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {         /* npu_pinned->npu */
    auto src_npu_pinned_place =
        BOOST_GET_CONST(platform::NPUPinnedPlace, src_place);
    auto dst_npu_place = BOOST_GET_CONST(platform::NPUPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_npu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from NPU Pinned memory to NPU memory, current "
                          "device context place should be NPU."));
    auto ctx_npu_place = BOOST_GET_CONST(platform::NPUPlace, ctx_place);
    PADDLE_ENFORCE_EQ(dst_npu_place, ctx_npu_place,
                      platform::errors::PreconditionNotMet(
                          "The target NPU device and current device context do "
                          "not match. The target NPU device number is %d, but "
                          "device context NPU number is %d.",
                          dst_npu_place.device, ctx_npu_place.device));
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(dst_npu_place, dst_ptr, src_npu_pinned_place, src_ptr, size,
                 stream);
  }
  else if (platform::is_npu_place(src_place) &&        // NOLINT
           platform::is_npu_pinned_place(dst_place)) { /* npu->npu_pinned */
    auto src_npu_place = BOOST_GET_CONST(platform::NPUPlace, src_place);
    auto dst_npu_pinned_place =
        BOOST_GET_CONST(platform::NPUPinnedPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_npu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from NPU memory to NPU Pinned memory, current "
                          "device context place should be NPU."));
    auto ctx_npu_place = BOOST_GET_CONST(platform::NPUPlace, ctx_place);
    PADDLE_ENFORCE_EQ(src_place, ctx_npu_place,
                      platform::errors::PreconditionNotMet(
                          "The source NPU device and current device context do "
                          "not match. The source NPU device number is %d, but "
                          "device context NPU number is %d.",
                          src_npu_place.device, ctx_npu_place.device));
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(dst_npu_pinned_place, dst_ptr, src_npu_place, src_ptr, size,
                 stream);
  }
200 201 202 203 204
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
205
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
206 207 208 209 210 211
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
  }
212
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
Y
Yi Wang 已提交
213
           platform::is_cpu_place(dst_place)) {
214 215 216
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
217 218 219
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
220 221
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
222 223 224
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
225 226
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
Y
Yi Wang 已提交
227
    auto ctx_place = ctx.GetPlace();
228 229 230 231 232
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
233
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
234 235 236 237 238
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
                      platform::errors::Unavailable(
                          "Source place and context place do not match, source "
                          "place is %s, context place is %s.",
                          src_gpu_place, ctx_gpu_place));
239
    auto stream =
F
fengjiayi 已提交
240
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
241
    memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
242 243 244
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
245 246
    auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
Y
Yi Wang 已提交
247
    auto ctx_place = ctx.GetPlace();
248 249 250 251 252
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
253
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
254 255 256 257 258
    PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
                      platform::errors::Unavailable(
                          "Destination place and context place do not match, "
                          "destination place is %s, context place is %s.",
                          dst_gpu_place, ctx_gpu_place));
259
    auto stream =
F
fengjiayi 已提交
260
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
261
    memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
262 263 264
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cuda_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from GPU memory to CUDA Pinned memory, current "
                          "device context place should be GPU."));
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
                      platform::errors::PreconditionNotMet(
                          "The source GPU device and current device context do "
                          "not match. The source GPU device number is %d, but "
                          "device context GPU number is %d.",
                          src_gpu_place.device, ctx_gpu_place.device));
    auto stream =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
    memory::Copy(dst_cuda_pinned_place, dst_ptr, src_gpu_place, src_ptr, size,
                 stream);
285 286 287
  }
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
    auto src_cuda_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from CUDA Pinned memory to GPU memory, current "
                          "device context place should be GPU."));
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
    PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
                      platform::errors::PreconditionNotMet(
                          "The target GPU device and current device context do "
                          "not match. The target GPU device number is %d, but "
                          "device context GPU number is %d.",
                          dst_gpu_place.device, ctx_gpu_place.device));
    auto stream =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
    memory::Copy(dst_gpu_place, dst_ptr, src_cuda_pinned_place, src_ptr, size,
                 stream);
308 309 310
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
311 312
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
Y
Yi Wang 已提交
313
    auto ctx_place = ctx.GetPlace();
314 315 316 317 318
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
319
    auto stream =
F
fengjiayi 已提交
320
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
C
chengduo 已提交
321 322 323 324 325 326 327
    if (platform::is_same_place(src_place, dst_place)) {
      memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                   stream);
    } else {
      if (platform::is_same_place(ctx_place, src_place)) {
        memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                     stream);
C
chengduo 已提交
328
        platform::DeviceContextPool::Instance().Get(src.place())->Wait();
C
chengduo 已提交
329
      } else if (platform::is_same_place(ctx_place, dst_place)) {
C
chengduo 已提交
330
        platform::DeviceContextPool::Instance().Get(src.place())->Wait();
C
chengduo 已提交
331 332 333
        memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                     stream);
      } else {
334 335
        PADDLE_THROW(platform::errors::Unavailable(
            "Context place dose not match the source and destination place."));
C
chengduo 已提交
336 337
      }
    }
338 339
  }
  else {  // NOLINT
340 341
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copying from %s to %s is not supported.", src_place, dst_place));
Y
Yi Wang 已提交
342 343 344 345 346 347 348 349
  }
#endif
}

void TensorCopy(const Tensor& src, const platform::Place& dst_place,
                Tensor* dst) {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  const platform::DeviceContext* dev_ctx;
350
  if (platform::is_gpu_place(dst_place) || platform::is_npu_place(dst_place)) {
Y
Yi Wang 已提交
351
    dev_ctx = pool.Get(dst_place);
C
chengduo 已提交
352 353
  } else {
    dev_ctx = pool.Get(src.place());
Y
Yi Wang 已提交
354 355 356 357
  }
  TensorCopy(src, dst_place, *dev_ctx, dst);
}

F
fengjiayi 已提交
358 359
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
                    Tensor* dst) {
360 361 362 363 364 365
  if (&src == dst) {
    auto src_copy = src;
    TensorCopySync(src_copy, dst_place, dst);
    return;
  }

M
minqiyang 已提交
366 367
  VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
          << " to " << dst_place;
F
fengjiayi 已提交
368 369 370
  src.check_memory_size();
  dst->Resize(src.dims());
  dst->set_layout(src.layout());
J
Jacek Czaja 已提交
371 372 373
#ifdef PADDLE_WITH_MKLDNN
  dst->set_format(src.format());
#endif
F
fengjiayi 已提交
374 375 376
  auto src_place = src.place();
  auto src_ptr = src.data<void>();
  auto dst_ptr = dst->mutable_data(dst_place, src.type());
377 378 379 380 381 382 383

  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data from " << src_place << " to "
            << dst_place;
    return;
  }

F
fengjiayi 已提交
384 385
  auto size = src.numel() * SizeOfType(src.type());
  if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
386 387
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
F
fengjiayi 已提交
388
  }
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else if (platform::is_cpu_place(src_place) &&  // NOLINT
             platform::is_xpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
  } else if (platform::is_xpu_place(src_place) &&  // NOLINT
             platform::is_xpu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
#ifdef PADDLE_WITH_ASCEND_CL
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {  /* npu -> cpu*/
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {  /* cpu -> npu*/
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {  /* npu -> npu*/
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data sync from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
441
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
442 443 444 445 446 447
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
  }
448
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
F
fengjiayi 已提交
449
           platform::is_cpu_place(dst_place)) {
450 451 452
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
453 454 455
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
456 457
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
458 459 460
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
461 462 463
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPlace, src_place), src_ptr, size,
                 nullptr);
464 465 466
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
467 468
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
F
fengjiayi 已提交
469
    memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
470 471 472
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
473 474
    auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
F
fengjiayi 已提交
475
    memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr);
476 477 478
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
479 480
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
F
fengjiayi 已提交
481
    memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
482 483 484
  }
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
485 486 487
    auto src_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
W
Wu Yi 已提交
488 489
    memory::Copy(dst_gpu_place, dst_ptr, src_pinned_place, src_ptr, size,
                 nullptr);
490 491
  }
  else {  // NOLINT
492 493
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
F
fengjiayi 已提交
494 495 496 497
  }
#endif
}

Y
Yang Yu 已提交
498 499 500 501 502 503 504 505 506 507 508 509
template <typename Predicate, typename DevCtx>
struct AnyDTypeVisitor {
  Predicate predicate_;
  const Tensor& tensor_;
  const DevCtx& ctx_;
  Tensor* out_;

  AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
                  Tensor* out)
      : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}

  template <typename T>
D
dzhwinter 已提交
510
  void apply() const {
Y
Yang Yu 已提交
511 512
    auto t = EigenVector<T>::Flatten(tensor_);
    auto o = EigenScalar<bool>::From(*out_);
Y
Yang Yu 已提交
513
    // return any of predicate_(t) is true.
Y
Yang Yu 已提交
514 515 516 517 518 519 520
    o.device(*ctx_.eigen_device()) = predicate_(t).any();
  }
};

template <typename Predicate, typename DevCtx>
inline void AnyImpl(Predicate predicate, const framework::Tensor& tensor,
                    const DevCtx& ctx, framework::Tensor* out) {
Y
Yu Yang 已提交
521 522
  VisitDataType(tensor.type(), AnyDTypeVisitor<Predicate, DevCtx>(
                                   predicate, tensor, ctx, out));
Y
Yang Yu 已提交
523 524 525
}

template <typename Predicate>
526 527
class AnyVisitor : public boost::static_visitor<bool> {
 private:
Y
Yang Yu 已提交
528 529 530
  const framework::Tensor& tensor_;
  Predicate predicate_;

531 532 533 534 535 536 537 538 539 540 541 542 543
  bool GetResultHelper(const framework::Tensor& out,
                       const platform::Place& place) const {
    platform::CPUPlace cpu;
    framework::Tensor tmp;
    tmp.Resize({1});
    tmp.mutable_data<bool>(cpu);
    auto ctx = platform::DeviceContextPool::Instance().Get(place);
    ctx->Wait();
    TensorCopy(out, cpu, *ctx, &tmp);
    ctx->Wait();
    return GetResult(tmp, cpu);
  }

544
 public:
Y
Yang Yu 已提交
545 546 547 548 549 550 551 552 553 554 555 556 557
  AnyVisitor(const framework::Tensor& tensor, Predicate predicate)
      : tensor_(tensor), predicate_(std::move(predicate)) {}

  template <typename Place>
  bool operator()(const Place& place) const {
    framework::Tensor out;
    out.Resize({1});
    out.mutable_data<bool>(place);
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    AnyImpl(predicate_, tensor_, *ctx, &out);
    return this->GetResult(out, place);
  }

558 559 560 561 562
  bool GetResult(const framework::Tensor& out,
                 const platform::XPUPlace& xpu) const {
    return GetResultHelper(out, xpu);
  }

Y
Yang Yu 已提交
563 564
  bool GetResult(const framework::Tensor& out,
                 const platform::CUDAPlace& gpu) const {
565
    return GetResultHelper(out, gpu);
Y
Yang Yu 已提交
566 567
  }

568 569 570 571 572 573 574
  bool GetResult(const framework::Tensor& out,
                 const platform::NPUPlace& npu) const {
    PADDLE_THROW(
        platform::errors::Unimplemented("Not supported on place (%s) ", npu));
    // return GetResultHelper(out, npu);
  }

575 576 577 578 579
  bool GetResult(const framework::Tensor& out,
                 const platform::NPUPinnedPlace& cpu) const {
    return *out.data<bool>();
  }

Y
Yang Yu 已提交
580 581 582 583
  bool GetResult(const framework::Tensor& out,
                 const platform::CPUPlace& cpu) const {
    return *out.data<bool>();
  }
C
chengduoZH 已提交
584 585 586 587 588

  bool GetResult(const framework::Tensor& out,
                 const platform::CUDAPinnedPlace& cpu) const {
    return *out.data<bool>();
  }
Y
Yang Yu 已提交
589 590
};

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
template <typename Predicate>
class AnyOutVisitor : public boost::static_visitor<> {
 private:
  const framework::Tensor& tensor_;
  mutable framework::Tensor* out_;
  Predicate predicate_;

 public:
  AnyOutVisitor(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out)
      : tensor_(tensor), out_(out), predicate_(std::move(predicate)) {}

  template <typename Place>
  void operator()(const Place& place) const {
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    out_->Resize({1});
    out_->mutable_data<bool>(place);
    AnyImpl(predicate_, tensor_, *ctx, out_);
  }
};

Y
Yang Yu 已提交
612 613 614 615 616 617 618
template <typename Predicate>
inline bool Any(const framework::Tensor& tensor, Predicate predicate) {
  AnyVisitor<Predicate> visitor(tensor, predicate);
  auto place = tensor.place();
  return platform::VisitPlace(place, visitor);
}

619 620 621 622 623 624 625 626
template <typename Predicate>
inline void Any(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out) {
  AnyOutVisitor<Predicate> visitor(tensor, predicate, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

J
Jack Zhou 已提交
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
template <typename Predicate, typename DevCtx>
struct AllDTypeVisitor {
  Predicate predicate_;
  const Tensor& tensor_;
  const DevCtx& ctx_;
  Tensor* out_;

  AllDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
                  Tensor* out)
      : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}

  template <typename T>
  void apply() const {
    auto t = EigenVector<T>::Flatten(tensor_);
    auto o = EigenVector<bool>::Flatten(*out_);
    o.device(*ctx_.eigen_device()) = predicate_(t);
  }
};

template <typename Predicate, typename DevCtx>
inline void AllImpl(Predicate predicate, const framework::Tensor& tensor,
                    const DevCtx& ctx, framework::Tensor* out) {
  VisitDataType(tensor.type(), AllDTypeVisitor<Predicate, DevCtx>(
                                   predicate, tensor, ctx, out));
}

template <typename Predicate>
class AllOutVisitor : public boost::static_visitor<> {
 private:
  const framework::Tensor& tensor_;
  mutable framework::Tensor* out_;
  Predicate predicate_;

 public:
  AllOutVisitor(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out)
      : tensor_(tensor), out_(out), predicate_(predicate) {}

  template <typename Place>
  void operator()(const Place& place) const {
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    out_->Resize(tensor_.dims());
    out_->mutable_data<bool>(place);
    AllImpl(predicate_, tensor_, *ctx, out_);
  }
};

template <typename Predicate>
inline void All(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out) {
  AllOutVisitor<Predicate> visitor(tensor, predicate, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

Y
Yi Wang 已提交
682
struct ContainsNANPredicate {
Y
Yang Yu 已提交
683 684 685
  template <typename T>
  auto operator()(const T& eigen_vec) const
      -> decltype(std::declval<T>().isnan()) {
Y
Yang Yu 已提交
686
    // Cast eigen_vector to vector of bool. true if is inf.
Y
Yang Yu 已提交
687 688 689 690
    return eigen_vec.isnan();
  }
};

Y
Yi Wang 已提交
691 692
bool TensorContainsNAN(const framework::Tensor& tensor) {
  ContainsNANPredicate predicate;
Y
Yang Yu 已提交
693 694 695
  return Any(tensor, predicate);
}

696 697 698 699 700 701
void TensorContainsNAN(const framework::Tensor& tensor,
                       framework::Tensor* out) {
  ContainsNANPredicate predicate;
  Any(tensor, predicate, out);
}

J
Jack Zhou 已提交
702 703 704 705 706 707
void TensorContainsNANV2(const framework::Tensor& tensor,
                         framework::Tensor* out) {
  ContainsNANPredicate predicate;
  All(tensor, predicate, out);
}

Y
Yi Wang 已提交
708
struct ContainsInfPredicate {
Y
Yang Yu 已提交
709 710 711
  template <typename T>
  auto operator()(const T& eigen_vec) const
      -> decltype(std::declval<T>().isinf()) {
Y
Yang Yu 已提交
712
    // Cast eigen_vector to vector of bool. true if is inf.
Y
Yang Yu 已提交
713 714 715 716
    return eigen_vec.isinf();
  }
};

Y
Yi Wang 已提交
717 718
bool TensorContainsInf(const framework::Tensor& tensor) {
  ContainsInfPredicate predicate;
Y
Yang Yu 已提交
719 720 721
  return Any(tensor, predicate);
}

722 723 724 725 726 727
void TensorContainsInf(const framework::Tensor& tensor,
                       framework::Tensor* out) {
  ContainsInfPredicate predicate;
  Any(tensor, predicate, out);
}

J
Jack Zhou 已提交
728 729 730 731 732 733
void TensorContainsInfV2(const framework::Tensor& tensor,
                         framework::Tensor* out) {
  ContainsInfPredicate predicate;
  All(tensor, predicate, out);
}

734 735 736 737 738 739 740 741 742 743
// NOTE(dzhwinter):
// Isfinite need a AllVisitor to loop through all the elements.
// We choose two cuda call instead of one allvisitor. The AllVisitor
// should be implemented if the performance hurts.
bool TensorIsfinite(const framework::Tensor& tensor) {
  ContainsInfPredicate pred_inf;
  ContainsNANPredicate pred_nan;
  return !Any(tensor, pred_inf) && !Any(tensor, pred_nan);
}

744
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
745
template <typename T>
J
Jack Zhou 已提交
746 747
static inline void __global__ BothFalse(const T* cmp, T* out, int element_num) {
  CUDA_KERNEL_LOOP(i, element_num) { out[i] = (!cmp[i]) && (!out[i]); }
748 749 750 751 752 753 754 755 756 757 758 759 760 761
}
#endif

struct BothFalseVisitor : public boost::static_visitor<> {
  const framework::Tensor& in_;
  mutable framework::Tensor* out_;
  BothFalseVisitor(const framework::Tensor& in, framework::Tensor* out)
      : in_(in), out_(out) {}

  template <typename Place>
  void operator()(const Place& place) const {
    VisitorImpl(place);
  }

762 763 764 765
  void VisitorImpl(const platform::XPUPlace& xpu) const {
    PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
  }

766
  void VisitorImpl(const platform::CUDAPlace& gpu) const {
767
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
768
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(gpu);
J
Jack Zhou 已提交
769 770 771 772 773 774 775 776 777 778
    constexpr int MAX_BLOCK_DIM = 512;
    const int MAX_GRID_DIM = ctx->GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
    int element_num = in_.numel();
    int block_size = (element_num >= MAX_BLOCK_DIM)
                         ? MAX_BLOCK_DIM
                         : (1 << static_cast<int>(std::log2(element_num)));
    int grid_size = element_num / block_size;
    grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
    BothFalse<bool><<<grid_size, block_size, 0, ctx->stream()>>>(
        in_.data<bool>(), out_->mutable_data<bool>(gpu), element_num);
779 780 781
#endif
  }

782 783 784 785
  void VisitorImpl(const platform::NPUPlace& npu) const {
    // TODO(zhiqiu)
  }

786
  void VisitorImpl(const platform::CPUPlace& cpu) const {
J
Jack Zhou 已提交
787 788 789 790 791 792 793 794
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
795 796 797 798
  }

  void VisitorImpl(
      const platform::CUDAPinnedPlace& cpu /* equals to cpu*/) const {
J
Jack Zhou 已提交
799 800 801 802 803 804 805 806
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
807
  }
808 809 810 811 812 813 814 815 816 817 818 819

  void VisitorImpl(
      const platform::NPUPinnedPlace& cpu /* equals to cpu*/) const {
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
  }
820 821 822 823 824 825 826 827 828 829 830
};

void TensorIsfinite(const framework::Tensor& tensor, framework::Tensor* out) {
  framework::Tensor tmp;
  TensorContainsInf(tensor, &tmp);
  TensorContainsNAN(tensor, out);
  BothFalseVisitor visitor(tmp, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

J
Jack Zhou 已提交
831 832 833 834 835 836 837 838 839
void TensorIsfiniteV2(const framework::Tensor& tensor, framework::Tensor* out) {
  framework::Tensor tmp;
  TensorContainsInfV2(tensor, &tmp);
  TensorContainsNANV2(tensor, out);
  BothFalseVisitor visitor(tmp, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

Y
Yi Wang 已提交
840 841 842 843 844 845 846 847 848 849
void TensorToStream(std::ostream& os, const Tensor& tensor,
                    const platform::DeviceContext& dev_ctx) {
  {  // the 1st field, uint32_t version
    constexpr uint32_t version = 0;
    os.write(reinterpret_cast<const char*>(&version), sizeof(version));
  }
  {  // the 2nd field, tensor description
     // int32_t  size
     // void*    protobuf message
    proto::VarType::TensorDesc desc;
Y
Yu Yang 已提交
850
    desc.set_data_type(tensor.type());
Y
Yi Wang 已提交
851 852 853 854 855 856 857 858 859 860
    auto dims = framework::vectorize(tensor.dims());
    auto* pb_dims = desc.mutable_dims();
    pb_dims->Resize(static_cast<int>(dims.size()), 0);
    std::copy(dims.begin(), dims.end(), pb_dims->begin());
    int32_t size = desc.ByteSize();
    os.write(reinterpret_cast<const char*>(&size), sizeof(size));
    auto out = desc.SerializeAsString();
    os.write(out.data(), size);
  }
  {  // the 3rd field, tensor data
Y
yuyang18 已提交
861 862
    uint64_t size = tensor.numel() * framework::SizeOfType(tensor.type());

Y
Yi Wang 已提交
863
    auto* data_ptr = tensor.data<void>();
W
wanghuancoder 已提交
864
    PADDLE_ENFORCE_LT(size, (std::numeric_limits<std::streamsize>::max)(),
T
tangwei12 已提交
865 866
                      platform::errors::ResourceExhausted(
                          "tensor size %d overflow when writing tensor", size));
Y
Yi Wang 已提交
867
    if (platform::is_gpu_place(tensor.place())) {
868
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yi Wang 已提交
869 870 871 872 873 874 875 876 877
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& gpu_dev_ctx =
          static_cast<const platform::CUDADeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
878
                     BOOST_GET_CONST(platform::CUDAPlace, tensor.place()),
Y
Yi Wang 已提交
879 880 881 882 883 884 885 886
                     reinterpret_cast<const void*>(data), size_to_write,
                     gpu_dev_ctx.stream());
        gpu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
T
tangwei12 已提交
887 888
      PADDLE_THROW(platform::errors::Unimplemented(
          "CUDAPlace is not supported when not compiled with CUDA"));
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
#endif
    } else if (platform::is_xpu_place(tensor.place())) {
#ifdef PADDLE_WITH_XPU
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& xpu_dev_ctx =
          static_cast<const platform::XPUDeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
                     BOOST_GET_CONST(platform::XPUPlace, tensor.place()),
                     reinterpret_cast<const void*>(data), size_to_write);
        xpu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "XPUPlace is not supported when not compiled with XPU"));
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
#endif
    } else if (platform::is_npu_place(tensor.place())) {
#ifdef PADDLE_WITH_ASCEND_CL
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& npu_dev_ctx =
          static_cast<const platform::NPUDeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
                     BOOST_GET_CONST(platform::NPUPlace, tensor.place()),
                     reinterpret_cast<const void*>(data), size_to_write,
                     npu_dev_ctx.stream());
        npu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "NPUPlace is not supported when not compiled with NPU"));
Y
Yi Wang 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946 947
#endif
    } else {
      os.write(static_cast<const char*>(data_ptr),
               static_cast<std::streamsize>(size));
    }
  }
}

struct DeserializedDataFunctor {
  DeserializedDataFunctor(void** buf, Tensor* tensor,
                          const platform::Place& place)
      : buf_(buf), tensor_(tensor), place_(place) {}

  template <typename T>
D
dzhwinter 已提交
948
  void apply() {
Y
Yi Wang 已提交
949 950 951 952 953 954 955 956
    *buf_ = tensor_->mutable_data<T>(place_);
  }

  void** buf_;
  Tensor* tensor_;
  platform::Place place_;
};

T
tangwei12 已提交
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx,
                      const size_t& seek, const std::vector<int64_t>& shape) {
  uint32_t version;
  is.read(reinterpret_cast<char*>(&version), sizeof(version));

  PADDLE_ENFORCE_EQ(
      version, 0U,
      platform::errors::InvalidArgument(
          "tensor version %u is not supported, Only version 0 is supported",
          version));

  proto::VarType::TensorDesc desc;
  {  // int32_t size
    // proto buffer
    int32_t size;
    is.read(reinterpret_cast<char*>(&size), sizeof(size));
    std::unique_ptr<char[]> buf(new char[size]);
    is.read(reinterpret_cast<char*>(buf.get()), size);
    PADDLE_ENFORCE_EQ(
        desc.ParseFromArray(buf.get(), size), true,
        platform::errors::InvalidArgument("Cannot parse tensor desc"));
  }
  {  // read tensor
    tensor->Resize(framework::make_ddim(shape));
    size_t seekg = seek * framework::SizeOfType(desc.data_type());
    is.seekg(seekg, is.cur);

    void* buf;
    auto ctx = platform::CPUDeviceContext();
    size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
988
    if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
989 990
        platform::is_xpu_place(dev_ctx.GetPlace()) ||
        platform::is_npu_place(dev_ctx.GetPlace())) {
991
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
992
    defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_ASCEND_CL)
T
tangwei12 已提交
993 994 995 996 997 998 999 1000
      Tensor cpu_tensor;
      cpu_tensor.Resize(framework::make_ddim(shape));
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
      is.read(static_cast<char*>(buf), size);
      auto dst_place = dev_ctx.GetPlace();
      framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
1001 1002 1003
      if (platform::is_npu_place(dev_ctx.GetPlace())) {
        dev_ctx.Wait();
      }
T
tangwei12 已提交
1004
#else
1005 1006 1007
      if (platform::is_gpu_place(dev_ctx.GetPlace())) {
        PADDLE_THROW(platform::errors::Unimplemented(
            "CUDAPlace is not supported when not compiled with CUDA"));
1008
      } else if (platform::is_xpu_place(dev_ctx.GetPlace())) {
1009 1010
        PADDLE_THROW(platform::errors::Unimplemented(
            "XPUPlace is not supported when not compiled with XPU"));
1011 1012 1013
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "NPUPlace is not supported when not compiled with NPU"));
1014
      }
T
tangwei12 已提交
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
#endif
    } else {
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
      is.read(static_cast<char*>(buf), size);
    }
  }
}

Y
Yi Wang 已提交
1025 1026 1027 1028
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx) {
  uint32_t version;
  is.read(reinterpret_cast<char*>(&version), sizeof(version));
T
tangwei12 已提交
1029 1030 1031 1032 1033
  PADDLE_ENFORCE_EQ(
      version, 0U,
      platform::errors::InvalidArgument(
          "tensor version %u is not supported, Only version 0 is supported",
          version));
Y
Yi Wang 已提交
1034 1035 1036 1037 1038 1039 1040
  proto::VarType::TensorDesc desc;
  {  // int32_t size
     // proto buffer
    int32_t size;
    is.read(reinterpret_cast<char*>(&size), sizeof(size));
    std::unique_ptr<char[]> buf(new char[size]);
    is.read(reinterpret_cast<char*>(buf.get()), size);
T
tangwei12 已提交
1041 1042 1043
    PADDLE_ENFORCE_EQ(
        desc.ParseFromArray(buf.get(), size), true,
        platform::errors::InvalidArgument("Cannot parse tensor desc"));
Y
Yi Wang 已提交
1044 1045 1046 1047 1048 1049 1050 1051
  }
  {  // read tensor
    std::vector<int64_t> dims;
    dims.reserve(static_cast<size_t>(desc.dims().size()));
    std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
    tensor->Resize(framework::make_ddim(dims));
    void* buf;
    auto ctx = platform::CPUDeviceContext();
Y
Yu Yang 已提交
1052
    size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
1053
    if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
1054 1055
        platform::is_xpu_place(dev_ctx.GetPlace()) ||
        platform::is_npu_place(dev_ctx.GetPlace())) {
1056
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
1057
    defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_ASCEND_CL)
Y
Yi Wang 已提交
1058 1059 1060 1061 1062
      Tensor cpu_tensor;
      cpu_tensor.Resize(framework::make_ddim(dims));
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
Y
yuyang18 已提交
1063
      is.read(static_cast<char*>(buf), size);
Y
Yi Wang 已提交
1064 1065
      auto dst_place = dev_ctx.GetPlace();
      framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
1066 1067 1068
      if (platform::is_npu_place(dev_ctx.GetPlace())) {
        dev_ctx.Wait();
      }
Y
Yi Wang 已提交
1069
#else
1070 1071 1072
      if (platform::is_gpu_place(dev_ctx.GetPlace())) {
        PADDLE_THROW(platform::errors::Unimplemented(
            "CUDAPlace is not supported when not compiled with CUDA"));
1073
      } else if (platform::is_xpu_place(dev_ctx.GetPlace())) {
1074 1075
        PADDLE_THROW(platform::errors::Unimplemented(
            "XPUPlace is not supported when not compiled with XPU"));
1076 1077 1078
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "NPUPlace is not supported when not compiled with NPU"));
1079
      }
Y
Yi Wang 已提交
1080 1081 1082 1083 1084
#endif
    } else {
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
Y
yuyang18 已提交
1085
      is.read(static_cast<char*>(buf), size);
Y
Yi Wang 已提交
1086 1087 1088 1089
    }
  }
}

6
633WHU 已提交
1090 1091 1092 1093
// get tensor data point by DLDataType
void* GetDstPtrByDLDataType(DLDataType type, framework::Tensor* dst,
                            const platform::Place& dst_place) {
  // vector types not currently supported
1094 1095 1096
  PADDLE_ENFORCE_LE(type.lanes, 1,
                    platform::errors::Unimplemented(
                        "Vector type is not supported currently."));
6
633WHU 已提交
1097 1098 1099 1100 1101 1102 1103

  switch (type.bits) {
    case 8:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int8_t>(dst_place));
      if (type.code == kDLUInt)
        return static_cast<void*>(dst->mutable_data<uint8_t>(dst_place));
1104 1105 1106
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1107 1108 1109 1110 1111 1112
    case 16:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int16_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(
            dst->mutable_data<paddle::platform::float16>(dst_place));
S
Siming Dai 已提交
1113 1114 1115
      if (type.code == kDLBfloat)
        return static_cast<void*>(
            dst->mutable_data<paddle::platform::bfloat16>(dst_place));
1116 1117 1118
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1119 1120 1121 1122 1123
    case 32:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int32_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(dst->mutable_data<float>(dst_place));
1124 1125 1126
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1127 1128 1129 1130 1131
    case 64:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int64_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(dst->mutable_data<double>(dst_place));
S
Siming Dai 已提交
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
      if (type.code == kDLComplex)
        return static_cast<void*>(
            dst->mutable_data<paddle::platform::complex<float>>(dst_place));
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
    case 128:
      if (type.code == kDLComplex)
        return static_cast<void*>(
            dst->mutable_data<paddle::platform::complex<double>>(dst_place));
1142 1143 1144
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1145
    default:
1146 1147
      PADDLE_THROW(platform::errors::Unimplemented(
          "Unsupported DLDataType.bits %d.", type.bits));
6
633WHU 已提交
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
  }
}

void TensorFromDLPack(const ::DLTensor& dl_tensor, framework::Tensor* dst) {
  platform::CPUPlace dst_place = platform::CPUPlace();
  platform::CPUPlace src_place = platform::CPUPlace();

  std::vector<int64_t> vec;
  std::copy(dl_tensor.shape, dl_tensor.shape + dl_tensor.ndim,
            std::back_inserter(vec));

  framework::DDim vddim = framework::make_ddim(vec);

  dst->Resize(vddim);
  ::DLDataType type = dl_tensor.dtype;
  void* dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);

  auto src_ptr = static_cast<const void*>(dl_tensor.data);
  auto size = paddle::framework::product(vddim) * type.bits / 8;

S
Siming Dai 已提交
1168
  if (dl_tensor.device.device_type == kDLCPU) {
6
633WHU 已提交
1169 1170
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
  }
1171
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
S
Siming Dai 已提交
1172
  if (dl_tensor.device.device_type == kDLGPU) {
6
633WHU 已提交
1173
    platform::CUDAPlace dst_place =
S
Siming Dai 已提交
1174
        platform::CUDAPlace(dl_tensor.device.device_id);
6
633WHU 已提交
1175
    platform::CUDAPlace src_place =
S
Siming Dai 已提交
1176
        platform::CUDAPlace(dl_tensor.device.device_id);
6
633WHU 已提交
1177 1178 1179 1180 1181 1182 1183
    dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(dst_place);
    memory::Copy(
        dst_place, dst_ptr, src_place, src_ptr, size,
        reinterpret_cast<const platform::CUDADeviceContext&>(*ctx).stream());
  }
#endif
1184 1185 1186
#ifdef PADDLE_WITH_XPU
  PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
#endif
6
633WHU 已提交
1187 1188
}

1189 1190 1191 1192 1193 1194
template <typename T>
std::string format_tensor(const framework::Tensor& tensor) {
  // TODO(zhiqiu): use the print option to format tensor.
  return "NOT IMPLEMENTED";
}

1195 1196 1197 1198 1199
template <typename T>
std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) {
  auto inspect = tensor.data<T>();
  auto element_num = tensor.numel();

1200
  os << "  - data: [";
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
  // Note: int8_t && uint8_t is typedf of char, ostream unable to print properly
  if (typeid(int8_t) == typeid(T) || typeid(uint8_t) == typeid(T)) {
    if (element_num > 0) {
      os << signed(inspect[0]);
      for (int j = 1; j < element_num; ++j) {
        os << " " << signed(inspect[j]);
      }
    }
  } else {
    if (element_num > 0) {
      os << inspect[0];
      for (int j = 1; j < element_num; ++j) {
        os << " " << inspect[j];
      }
1215 1216 1217 1218 1219 1220
    }
  }
  os << "]";
  return os;
}

1221
template <>
1222
std::ostream& print_tensor<paddle::platform::complex<float>>(
1223
    std::ostream& os, const framework::Tensor& tensor) {
1224
  auto inspect = tensor.data<paddle::platform::complex<float>>();
1225 1226 1227 1228
  auto element_num = tensor.numel();

  os << "  - data: [";
  if (element_num > 0) {
1229
    os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
1230
    for (int j = 1; j < element_num; ++j) {
1231 1232
      os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
         << "j";
1233 1234 1235 1236 1237 1238 1239
    }
  }
  os << "]";
  return os;
}

template <>
1240
std::ostream& print_tensor<paddle::platform::complex<double>>(
1241
    std::ostream& os, const framework::Tensor& tensor) {
1242
  auto inspect = tensor.data<paddle::platform::complex<double>>();
1243 1244 1245 1246
  auto element_num = tensor.numel();

  os << "  - data: [";
  if (element_num > 0) {
1247
    os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
1248
    for (int j = 1; j < element_num; ++j) {
1249 1250
      os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
         << "j";
1251 1252 1253 1254 1255 1256
    }
  }
  os << "]";
  return os;
}

1257
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
1258 1259 1260
  os << "  - place: " << t.place() << "\n";
  os << "  - shape: [" << t.dims() << "]\n";
  os << "  - layout: " << DataLayoutToString(t.layout()) << "\n";
1261

1262 1263 1264 1265 1266
#ifdef PADDLE_WITH_MKLDNN
  os << "  - format: "
     << dnnl_fmt_tag2str(static_cast<dnnl_format_tag_t>(t.format())) << "\n";
#endif

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
  Tensor tensor;
  tensor.Resize(t.dims());
  if (platform::is_cpu_place(t.place())) {
    tensor.ShareDataWith(t);
  } else {
    platform::CPUPlace place;
    framework::TensorCopy(t, place, &tensor);
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    auto& dev_ctx = *pool.Get(t.place());
    dev_ctx.Wait();
  }

#define PrintTensorCallback(cpp_type, proto_type) \
  do {                                            \
    if (tensor.type() == proto_type) {            \
1282
      os << "  - dtype: " << proto_type << "\n";  \
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
      print_tensor<cpp_type>(os, tensor);         \
      return os;                                  \
    }                                             \
  } while (0)

  _ForEachDataType_(PrintTensorCallback);
  VLOG(1) << "PrintVar: unrecognized data type:" << t.type();
  return os;
}

Y
Yang Yu 已提交
1293 1294
}  // namespace framework
}  // namespace paddle