tensor_util.cc 45.7 KB
Newer Older
Y
Yang Yu 已提交
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yang Yu 已提交
6

7 8 9 10 11 12 13
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yang Yu 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor_util.h"
16

C
chengduoZH 已提交
17 18
#include <algorithm>
#include <limits>
C
chengduo 已提交
19
#include <memory>
20
#include <string>
C
chengduo 已提交
21
#include <utility>
C
chengduoZH 已提交
22
#include <vector>
23

Y
yuyang18 已提交
24
#include "paddle/fluid/framework/data_type.h"
25
#include "paddle/fluid/platform/complex.h"
26
#include "paddle/fluid/platform/profiler.h"
27 28 29
#ifdef PADDLE_WITH_MKLDNN
#include "dnnl_debug.h"
#endif
Y
Yang Yu 已提交
30 31 32

namespace paddle {
namespace framework {
Y
Yi Wang 已提交
33 34

void TensorCopy(const Tensor& src, const platform::Place& dst_place,
F
fengjiayi 已提交
35
                const platform::DeviceContext& ctx, Tensor* dst) {
36 37 38 39 40 41
  if (&src == dst) {
    auto src_copy = src;
    TensorCopy(src_copy, dst_place, ctx, dst);
    return;
  }

M
minqiyang 已提交
42 43
  VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
          << dst_place;
Y
Yi Wang 已提交
44 45 46 47 48 49
  src.check_memory_size();

  dst->Resize(src.dims());
  dst->set_layout(src.layout());
  auto src_place = src.place();
  auto src_ptr = src.data<void>();
50 51 52 53 54 55 56 57 58
#ifdef PADDLE_WITH_MKLDNN
  dst->set_format(src.format());
  // oneDNN tensors due to padding may be of bigger size
  // than numel()*size(type())
  auto dst_ptr =
      src.layout() == DataLayout::kMKLDNN
          ? dst->mutable_data(dst_place, src.type(), src.memory_size())
          : dst->mutable_data(dst_place, src.type());
#else
Y
Yi Wang 已提交
59
  auto dst_ptr = dst->mutable_data(dst_place, src.type());
60
#endif
61 62 63 64 65
  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data async from " << src_place << " to "
            << dst_place;
    return;
  }
66
  VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr;
67

68 69 70 71 72
#ifdef PADDLE_WITH_MKLDNN
  auto size = src.layout() == DataLayout::kMKLDNN
                  ? src.memory_size()
                  : src.numel() * SizeOfType(src.type());
#else
Y
Yi Wang 已提交
73
  auto size = src.numel() * SizeOfType(src.type());
74
#endif
Y
Yi Wang 已提交
75 76

  if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
77 78
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
Y
Yi Wang 已提交
79
  }
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else if (platform::is_cpu_place(src_place) &&
             platform::is_xpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
  } else if (platform::is_xpu_place(src_place) &&
             platform::is_xpu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
#ifdef PADDLE_WITH_ASCEND_CL
  // TODO(zhiqiu): handle different condition like CUDA code below
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 stream);
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size,
                 stream);
  }
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 stream);
  }
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
139
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
140 141 142 143 144 145
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
  }
146
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
Y
Yi Wang 已提交
147
           platform::is_cpu_place(dst_place)) {
148 149 150
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
151 152 153
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
154 155
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
156 157 158
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
159 160
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
Y
Yi Wang 已提交
161
    auto ctx_place = ctx.GetPlace();
162 163 164 165 166
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
167
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
168 169 170 171 172
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
                      platform::errors::Unavailable(
                          "Source place and context place do not match, source "
                          "place is %s, context place is %s.",
                          src_gpu_place, ctx_gpu_place));
173
    auto stream =
F
fengjiayi 已提交
174
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
175
    memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
176 177 178
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
179 180
    auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
Y
Yi Wang 已提交
181
    auto ctx_place = ctx.GetPlace();
182 183 184 185 186
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
187
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
188 189 190 191 192
    PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
                      platform::errors::Unavailable(
                          "Destination place and context place do not match, "
                          "destination place is %s, context place is %s.",
                          dst_gpu_place, ctx_gpu_place));
193
    auto stream =
F
fengjiayi 已提交
194
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
195
    memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
196 197 198
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cuda_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from GPU memory to CUDA Pinned memory, current "
                          "device context place should be GPU."));
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
                      platform::errors::PreconditionNotMet(
                          "The source GPU device and current device context do "
                          "not match. The source GPU device number is %d, but "
                          "device context GPU number is %d.",
                          src_gpu_place.device, ctx_gpu_place.device));
    auto stream =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
    memory::Copy(dst_cuda_pinned_place, dst_ptr, src_gpu_place, src_ptr, size,
                 stream);
219 220 221
  }
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
    auto src_cuda_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from CUDA Pinned memory to GPU memory, current "
                          "device context place should be GPU."));
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
    PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
                      platform::errors::PreconditionNotMet(
                          "The target GPU device and current device context do "
                          "not match. The target GPU device number is %d, but "
                          "device context GPU number is %d.",
                          dst_gpu_place.device, ctx_gpu_place.device));
    auto stream =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
    memory::Copy(dst_gpu_place, dst_ptr, src_cuda_pinned_place, src_ptr, size,
                 stream);
242 243 244
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
245 246
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
Y
Yi Wang 已提交
247
    auto ctx_place = ctx.GetPlace();
248 249 250 251 252
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
253
    auto stream =
F
fengjiayi 已提交
254
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
C
chengduo 已提交
255 256 257 258 259 260 261
    if (platform::is_same_place(src_place, dst_place)) {
      memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                   stream);
    } else {
      if (platform::is_same_place(ctx_place, src_place)) {
        memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                     stream);
C
chengduo 已提交
262
        platform::DeviceContextPool::Instance().Get(src.place())->Wait();
C
chengduo 已提交
263
      } else if (platform::is_same_place(ctx_place, dst_place)) {
C
chengduo 已提交
264
        platform::DeviceContextPool::Instance().Get(src.place())->Wait();
C
chengduo 已提交
265 266 267
        memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                     stream);
      } else {
268 269
        PADDLE_THROW(platform::errors::Unavailable(
            "Context place dose not match the source and destination place."));
C
chengduo 已提交
270 271
      }
    }
272 273
  }
  else {  // NOLINT
274 275
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copying from %s to %s is not supported.", src_place, dst_place));
Y
Yi Wang 已提交
276 277 278 279 280 281 282 283
  }
#endif
}

void TensorCopy(const Tensor& src, const platform::Place& dst_place,
                Tensor* dst) {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  const platform::DeviceContext* dev_ctx;
284
  if (platform::is_gpu_place(dst_place) || platform::is_npu_place(dst_place)) {
Y
Yi Wang 已提交
285
    dev_ctx = pool.Get(dst_place);
C
chengduo 已提交
286 287
  } else {
    dev_ctx = pool.Get(src.place());
Y
Yi Wang 已提交
288 289 290 291
  }
  TensorCopy(src, dst_place, *dev_ctx, dst);
}

F
fengjiayi 已提交
292 293
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
                    Tensor* dst) {
294 295 296 297 298 299
  if (&src == dst) {
    auto src_copy = src;
    TensorCopySync(src_copy, dst_place, dst);
    return;
  }

M
minqiyang 已提交
300 301
  VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
          << " to " << dst_place;
F
fengjiayi 已提交
302 303 304
  src.check_memory_size();
  dst->Resize(src.dims());
  dst->set_layout(src.layout());
J
Jacek Czaja 已提交
305 306 307
#ifdef PADDLE_WITH_MKLDNN
  dst->set_format(src.format());
#endif
F
fengjiayi 已提交
308 309 310
  auto src_place = src.place();
  auto src_ptr = src.data<void>();
  auto dst_ptr = dst->mutable_data(dst_place, src.type());
311 312 313 314 315 316 317

  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data from " << src_place << " to "
            << dst_place;
    return;
  }

F
fengjiayi 已提交
318 319
  auto size = src.numel() * SizeOfType(src.type());
  if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
320 321
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
F
fengjiayi 已提交
322
  }
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else if (platform::is_cpu_place(src_place) &&  // NOLINT
             platform::is_xpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
  } else if (platform::is_xpu_place(src_place) &&  // NOLINT
             platform::is_xpu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
#ifdef PADDLE_WITH_ASCEND_CL
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {  /* npu -> cpu*/
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {  /* cpu -> npu*/
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {  /* npu -> npu*/
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data sync from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
375
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
376 377 378 379 380 381
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
  }
382
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
F
fengjiayi 已提交
383
           platform::is_cpu_place(dst_place)) {
384 385 386
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
387 388 389
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
390 391
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
392 393 394
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
395 396 397
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPlace, src_place), src_ptr, size,
                 nullptr);
398 399 400
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
401 402
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
F
fengjiayi 已提交
403
    memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
404 405 406
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
407 408
    auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
F
fengjiayi 已提交
409
    memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr);
410 411 412
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
413 414
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
F
fengjiayi 已提交
415
    memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
416 417 418
  }
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
419 420 421
    auto src_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
W
Wu Yi 已提交
422 423
    memory::Copy(dst_gpu_place, dst_ptr, src_pinned_place, src_ptr, size,
                 nullptr);
424 425
  }
  else {  // NOLINT
426 427
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
F
fengjiayi 已提交
428 429 430 431
  }
#endif
}

Y
Yang Yu 已提交
432 433 434 435 436 437 438 439 440 441 442 443
template <typename Predicate, typename DevCtx>
struct AnyDTypeVisitor {
  Predicate predicate_;
  const Tensor& tensor_;
  const DevCtx& ctx_;
  Tensor* out_;

  AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
                  Tensor* out)
      : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}

  template <typename T>
D
dzhwinter 已提交
444
  void apply() const {
Y
Yang Yu 已提交
445 446
    auto t = EigenVector<T>::Flatten(tensor_);
    auto o = EigenScalar<bool>::From(*out_);
Y
Yang Yu 已提交
447
    // return any of predicate_(t) is true.
Y
Yang Yu 已提交
448 449 450 451 452 453 454
    o.device(*ctx_.eigen_device()) = predicate_(t).any();
  }
};

template <typename Predicate, typename DevCtx>
inline void AnyImpl(Predicate predicate, const framework::Tensor& tensor,
                    const DevCtx& ctx, framework::Tensor* out) {
Y
Yu Yang 已提交
455 456
  VisitDataType(tensor.type(), AnyDTypeVisitor<Predicate, DevCtx>(
                                   predicate, tensor, ctx, out));
Y
Yang Yu 已提交
457 458 459
}

template <typename Predicate>
460 461
class AnyVisitor : public boost::static_visitor<bool> {
 private:
Y
Yang Yu 已提交
462 463 464
  const framework::Tensor& tensor_;
  Predicate predicate_;

465 466 467 468 469 470 471 472 473 474 475 476 477
  bool GetResultHelper(const framework::Tensor& out,
                       const platform::Place& place) const {
    platform::CPUPlace cpu;
    framework::Tensor tmp;
    tmp.Resize({1});
    tmp.mutable_data<bool>(cpu);
    auto ctx = platform::DeviceContextPool::Instance().Get(place);
    ctx->Wait();
    TensorCopy(out, cpu, *ctx, &tmp);
    ctx->Wait();
    return GetResult(tmp, cpu);
  }

478
 public:
Y
Yang Yu 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491
  AnyVisitor(const framework::Tensor& tensor, Predicate predicate)
      : tensor_(tensor), predicate_(std::move(predicate)) {}

  template <typename Place>
  bool operator()(const Place& place) const {
    framework::Tensor out;
    out.Resize({1});
    out.mutable_data<bool>(place);
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    AnyImpl(predicate_, tensor_, *ctx, &out);
    return this->GetResult(out, place);
  }

492 493 494 495 496
  bool GetResult(const framework::Tensor& out,
                 const platform::XPUPlace& xpu) const {
    return GetResultHelper(out, xpu);
  }

Y
Yang Yu 已提交
497 498
  bool GetResult(const framework::Tensor& out,
                 const platform::CUDAPlace& gpu) const {
499
    return GetResultHelper(out, gpu);
Y
Yang Yu 已提交
500 501
  }

502 503 504 505 506 507 508
  bool GetResult(const framework::Tensor& out,
                 const platform::NPUPlace& npu) const {
    PADDLE_THROW(
        platform::errors::Unimplemented("Not supported on place (%s) ", npu));
    // return GetResultHelper(out, npu);
  }

509 510 511 512 513
  bool GetResult(const framework::Tensor& out,
                 const platform::NPUPinnedPlace& cpu) const {
    return *out.data<bool>();
  }

Y
Yang Yu 已提交
514 515 516 517
  bool GetResult(const framework::Tensor& out,
                 const platform::CPUPlace& cpu) const {
    return *out.data<bool>();
  }
C
chengduoZH 已提交
518 519 520 521 522

  bool GetResult(const framework::Tensor& out,
                 const platform::CUDAPinnedPlace& cpu) const {
    return *out.data<bool>();
  }
Y
Yang Yu 已提交
523 524
};

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
template <typename Predicate>
class AnyOutVisitor : public boost::static_visitor<> {
 private:
  const framework::Tensor& tensor_;
  mutable framework::Tensor* out_;
  Predicate predicate_;

 public:
  AnyOutVisitor(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out)
      : tensor_(tensor), out_(out), predicate_(std::move(predicate)) {}

  template <typename Place>
  void operator()(const Place& place) const {
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    out_->Resize({1});
    out_->mutable_data<bool>(place);
    AnyImpl(predicate_, tensor_, *ctx, out_);
  }
};

Y
Yang Yu 已提交
546 547 548 549 550 551 552
template <typename Predicate>
inline bool Any(const framework::Tensor& tensor, Predicate predicate) {
  AnyVisitor<Predicate> visitor(tensor, predicate);
  auto place = tensor.place();
  return platform::VisitPlace(place, visitor);
}

553 554 555 556 557 558 559 560
template <typename Predicate>
inline void Any(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out) {
  AnyOutVisitor<Predicate> visitor(tensor, predicate, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

J
Jack Zhou 已提交
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
template <typename Predicate, typename DevCtx>
struct AllDTypeVisitor {
  Predicate predicate_;
  const Tensor& tensor_;
  const DevCtx& ctx_;
  Tensor* out_;

  AllDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
                  Tensor* out)
      : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}

  template <typename T>
  void apply() const {
    auto t = EigenVector<T>::Flatten(tensor_);
    auto o = EigenVector<bool>::Flatten(*out_);
    o.device(*ctx_.eigen_device()) = predicate_(t);
  }
};

template <typename Predicate, typename DevCtx>
inline void AllImpl(Predicate predicate, const framework::Tensor& tensor,
                    const DevCtx& ctx, framework::Tensor* out) {
  VisitDataType(tensor.type(), AllDTypeVisitor<Predicate, DevCtx>(
                                   predicate, tensor, ctx, out));
}

template <typename Predicate>
class AllOutVisitor : public boost::static_visitor<> {
 private:
  const framework::Tensor& tensor_;
  mutable framework::Tensor* out_;
  Predicate predicate_;

 public:
  AllOutVisitor(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out)
      : tensor_(tensor), out_(out), predicate_(predicate) {}

  template <typename Place>
  void operator()(const Place& place) const {
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    out_->Resize(tensor_.dims());
    out_->mutable_data<bool>(place);
    AllImpl(predicate_, tensor_, *ctx, out_);
  }
};

template <typename Predicate>
inline void All(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out) {
  AllOutVisitor<Predicate> visitor(tensor, predicate, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

Y
Yi Wang 已提交
616
struct ContainsNANPredicate {
Y
Yang Yu 已提交
617 618 619
  template <typename T>
  auto operator()(const T& eigen_vec) const
      -> decltype(std::declval<T>().isnan()) {
Y
Yang Yu 已提交
620
    // Cast eigen_vector to vector of bool. true if is inf.
Y
Yang Yu 已提交
621 622 623 624
    return eigen_vec.isnan();
  }
};

Y
Yi Wang 已提交
625 626
bool TensorContainsNAN(const framework::Tensor& tensor) {
  ContainsNANPredicate predicate;
Y
Yang Yu 已提交
627 628 629
  return Any(tensor, predicate);
}

630 631 632 633 634 635
void TensorContainsNAN(const framework::Tensor& tensor,
                       framework::Tensor* out) {
  ContainsNANPredicate predicate;
  Any(tensor, predicate, out);
}

J
Jack Zhou 已提交
636 637 638 639 640 641
void TensorContainsNANV2(const framework::Tensor& tensor,
                         framework::Tensor* out) {
  ContainsNANPredicate predicate;
  All(tensor, predicate, out);
}

Y
Yi Wang 已提交
642
struct ContainsInfPredicate {
Y
Yang Yu 已提交
643 644 645
  template <typename T>
  auto operator()(const T& eigen_vec) const
      -> decltype(std::declval<T>().isinf()) {
Y
Yang Yu 已提交
646
    // Cast eigen_vector to vector of bool. true if is inf.
Y
Yang Yu 已提交
647 648 649 650
    return eigen_vec.isinf();
  }
};

Y
Yi Wang 已提交
651 652
bool TensorContainsInf(const framework::Tensor& tensor) {
  ContainsInfPredicate predicate;
Y
Yang Yu 已提交
653 654 655
  return Any(tensor, predicate);
}

656 657 658 659 660 661
void TensorContainsInf(const framework::Tensor& tensor,
                       framework::Tensor* out) {
  ContainsInfPredicate predicate;
  Any(tensor, predicate, out);
}

J
Jack Zhou 已提交
662 663 664 665 666 667
void TensorContainsInfV2(const framework::Tensor& tensor,
                         framework::Tensor* out) {
  ContainsInfPredicate predicate;
  All(tensor, predicate, out);
}

668 669 670 671 672 673 674 675 676 677
// NOTE(dzhwinter):
// Isfinite need a AllVisitor to loop through all the elements.
// We choose two cuda call instead of one allvisitor. The AllVisitor
// should be implemented if the performance hurts.
bool TensorIsfinite(const framework::Tensor& tensor) {
  ContainsInfPredicate pred_inf;
  ContainsNANPredicate pred_nan;
  return !Any(tensor, pred_inf) && !Any(tensor, pred_nan);
}

678
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
679
template <typename T>
J
Jack Zhou 已提交
680 681
static inline void __global__ BothFalse(const T* cmp, T* out, int element_num) {
  CUDA_KERNEL_LOOP(i, element_num) { out[i] = (!cmp[i]) && (!out[i]); }
682 683 684 685 686 687 688 689 690 691 692 693 694 695
}
#endif

struct BothFalseVisitor : public boost::static_visitor<> {
  const framework::Tensor& in_;
  mutable framework::Tensor* out_;
  BothFalseVisitor(const framework::Tensor& in, framework::Tensor* out)
      : in_(in), out_(out) {}

  template <typename Place>
  void operator()(const Place& place) const {
    VisitorImpl(place);
  }

696 697 698 699
  void VisitorImpl(const platform::XPUPlace& xpu) const {
    PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
  }

700
  void VisitorImpl(const platform::CUDAPlace& gpu) const {
701
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
702
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(gpu);
J
Jack Zhou 已提交
703 704 705 706 707 708 709 710 711 712
    constexpr int MAX_BLOCK_DIM = 512;
    const int MAX_GRID_DIM = ctx->GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
    int element_num = in_.numel();
    int block_size = (element_num >= MAX_BLOCK_DIM)
                         ? MAX_BLOCK_DIM
                         : (1 << static_cast<int>(std::log2(element_num)));
    int grid_size = element_num / block_size;
    grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
    BothFalse<bool><<<grid_size, block_size, 0, ctx->stream()>>>(
        in_.data<bool>(), out_->mutable_data<bool>(gpu), element_num);
713 714 715
#endif
  }

716 717 718 719
  void VisitorImpl(const platform::NPUPlace& npu) const {
    // TODO(zhiqiu)
  }

720
  void VisitorImpl(const platform::CPUPlace& cpu) const {
J
Jack Zhou 已提交
721 722 723 724 725 726 727 728
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
729 730 731 732
  }

  void VisitorImpl(
      const platform::CUDAPinnedPlace& cpu /* equals to cpu*/) const {
J
Jack Zhou 已提交
733 734 735 736 737 738 739 740
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
741
  }
742 743 744 745 746 747 748 749 750 751 752 753

  void VisitorImpl(
      const platform::NPUPinnedPlace& cpu /* equals to cpu*/) const {
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
  }
754 755 756 757 758 759 760 761 762 763 764
};

void TensorIsfinite(const framework::Tensor& tensor, framework::Tensor* out) {
  framework::Tensor tmp;
  TensorContainsInf(tensor, &tmp);
  TensorContainsNAN(tensor, out);
  BothFalseVisitor visitor(tmp, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

J
Jack Zhou 已提交
765 766 767 768 769 770 771 772 773
void TensorIsfiniteV2(const framework::Tensor& tensor, framework::Tensor* out) {
  framework::Tensor tmp;
  TensorContainsInfV2(tensor, &tmp);
  TensorContainsNANV2(tensor, out);
  BothFalseVisitor visitor(tmp, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

Y
Yi Wang 已提交
774 775 776 777 778 779 780 781 782 783
void TensorToStream(std::ostream& os, const Tensor& tensor,
                    const platform::DeviceContext& dev_ctx) {
  {  // the 1st field, uint32_t version
    constexpr uint32_t version = 0;
    os.write(reinterpret_cast<const char*>(&version), sizeof(version));
  }
  {  // the 2nd field, tensor description
     // int32_t  size
     // void*    protobuf message
    proto::VarType::TensorDesc desc;
Y
Yu Yang 已提交
784
    desc.set_data_type(tensor.type());
Y
Yi Wang 已提交
785 786 787 788 789 790 791 792 793 794
    auto dims = framework::vectorize(tensor.dims());
    auto* pb_dims = desc.mutable_dims();
    pb_dims->Resize(static_cast<int>(dims.size()), 0);
    std::copy(dims.begin(), dims.end(), pb_dims->begin());
    int32_t size = desc.ByteSize();
    os.write(reinterpret_cast<const char*>(&size), sizeof(size));
    auto out = desc.SerializeAsString();
    os.write(out.data(), size);
  }
  {  // the 3rd field, tensor data
Y
yuyang18 已提交
795 796
    uint64_t size = tensor.numel() * framework::SizeOfType(tensor.type());

Y
Yi Wang 已提交
797
    auto* data_ptr = tensor.data<void>();
W
wanghuancoder 已提交
798
    PADDLE_ENFORCE_LT(size, (std::numeric_limits<std::streamsize>::max)(),
T
tangwei12 已提交
799 800
                      platform::errors::ResourceExhausted(
                          "tensor size %d overflow when writing tensor", size));
Y
Yi Wang 已提交
801
    if (platform::is_gpu_place(tensor.place())) {
802
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yi Wang 已提交
803 804 805 806 807 808 809 810 811
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& gpu_dev_ctx =
          static_cast<const platform::CUDADeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
812
                     BOOST_GET_CONST(platform::CUDAPlace, tensor.place()),
Y
Yi Wang 已提交
813 814 815 816 817 818 819 820
                     reinterpret_cast<const void*>(data), size_to_write,
                     gpu_dev_ctx.stream());
        gpu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
T
tangwei12 已提交
821 822
      PADDLE_THROW(platform::errors::Unimplemented(
          "CUDAPlace is not supported when not compiled with CUDA"));
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
#endif
    } else if (platform::is_xpu_place(tensor.place())) {
#ifdef PADDLE_WITH_XPU
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& xpu_dev_ctx =
          static_cast<const platform::XPUDeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
                     BOOST_GET_CONST(platform::XPUPlace, tensor.place()),
                     reinterpret_cast<const void*>(data), size_to_write);
        xpu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "XPUPlace is not supported when not compiled with XPU"));
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
#endif
    } else if (platform::is_npu_place(tensor.place())) {
#ifdef PADDLE_WITH_ASCEND_CL
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& npu_dev_ctx =
          static_cast<const platform::NPUDeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
                     BOOST_GET_CONST(platform::NPUPlace, tensor.place()),
                     reinterpret_cast<const void*>(data), size_to_write,
                     npu_dev_ctx.stream());
        npu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "NPUPlace is not supported when not compiled with NPU"));
Y
Yi Wang 已提交
868 869 870 871 872 873 874 875 876 877 878 879 880 881
#endif
    } else {
      os.write(static_cast<const char*>(data_ptr),
               static_cast<std::streamsize>(size));
    }
  }
}

struct DeserializedDataFunctor {
  DeserializedDataFunctor(void** buf, Tensor* tensor,
                          const platform::Place& place)
      : buf_(buf), tensor_(tensor), place_(place) {}

  template <typename T>
D
dzhwinter 已提交
882
  void apply() {
Y
Yi Wang 已提交
883 884 885 886 887 888 889 890
    *buf_ = tensor_->mutable_data<T>(place_);
  }

  void** buf_;
  Tensor* tensor_;
  platform::Place place_;
};

T
tangwei12 已提交
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx,
                      const size_t& seek, const std::vector<int64_t>& shape) {
  uint32_t version;
  is.read(reinterpret_cast<char*>(&version), sizeof(version));

  PADDLE_ENFORCE_EQ(
      version, 0U,
      platform::errors::InvalidArgument(
          "tensor version %u is not supported, Only version 0 is supported",
          version));

  proto::VarType::TensorDesc desc;
  {  // int32_t size
    // proto buffer
    int32_t size;
    is.read(reinterpret_cast<char*>(&size), sizeof(size));
    std::unique_ptr<char[]> buf(new char[size]);
    is.read(reinterpret_cast<char*>(buf.get()), size);
    PADDLE_ENFORCE_EQ(
        desc.ParseFromArray(buf.get(), size), true,
        platform::errors::InvalidArgument("Cannot parse tensor desc"));
  }
  {  // read tensor
    tensor->Resize(framework::make_ddim(shape));
    size_t seekg = seek * framework::SizeOfType(desc.data_type());
    is.seekg(seekg, is.cur);

    void* buf;
    auto ctx = platform::CPUDeviceContext();
    size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
922
    if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
923 924
        platform::is_xpu_place(dev_ctx.GetPlace()) ||
        platform::is_npu_place(dev_ctx.GetPlace())) {
925
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
926
    defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_ASCEND_CL)
T
tangwei12 已提交
927 928 929 930 931 932 933 934
      Tensor cpu_tensor;
      cpu_tensor.Resize(framework::make_ddim(shape));
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
      is.read(static_cast<char*>(buf), size);
      auto dst_place = dev_ctx.GetPlace();
      framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
935 936 937
      if (platform::is_npu_place(dev_ctx.GetPlace())) {
        dev_ctx.Wait();
      }
T
tangwei12 已提交
938
#else
939 940 941
      if (platform::is_gpu_place(dev_ctx.GetPlace())) {
        PADDLE_THROW(platform::errors::Unimplemented(
            "CUDAPlace is not supported when not compiled with CUDA"));
942
      } else if (platform::is_xpu_place(dev_ctx.GetPlace())) {
943 944
        PADDLE_THROW(platform::errors::Unimplemented(
            "XPUPlace is not supported when not compiled with XPU"));
945 946 947
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "NPUPlace is not supported when not compiled with NPU"));
948
      }
T
tangwei12 已提交
949 950 951 952 953 954 955 956 957 958
#endif
    } else {
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
      is.read(static_cast<char*>(buf), size);
    }
  }
}

Y
Yi Wang 已提交
959 960 961 962
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx) {
  uint32_t version;
  is.read(reinterpret_cast<char*>(&version), sizeof(version));
T
tangwei12 已提交
963 964 965 966 967
  PADDLE_ENFORCE_EQ(
      version, 0U,
      platform::errors::InvalidArgument(
          "tensor version %u is not supported, Only version 0 is supported",
          version));
Y
Yi Wang 已提交
968 969 970 971 972 973 974
  proto::VarType::TensorDesc desc;
  {  // int32_t size
     // proto buffer
    int32_t size;
    is.read(reinterpret_cast<char*>(&size), sizeof(size));
    std::unique_ptr<char[]> buf(new char[size]);
    is.read(reinterpret_cast<char*>(buf.get()), size);
T
tangwei12 已提交
975 976 977
    PADDLE_ENFORCE_EQ(
        desc.ParseFromArray(buf.get(), size), true,
        platform::errors::InvalidArgument("Cannot parse tensor desc"));
Y
Yi Wang 已提交
978 979 980 981 982 983 984 985
  }
  {  // read tensor
    std::vector<int64_t> dims;
    dims.reserve(static_cast<size_t>(desc.dims().size()));
    std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
    tensor->Resize(framework::make_ddim(dims));
    void* buf;
    auto ctx = platform::CPUDeviceContext();
Y
Yu Yang 已提交
986
    size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
987
    if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
988 989
        platform::is_xpu_place(dev_ctx.GetPlace()) ||
        platform::is_npu_place(dev_ctx.GetPlace())) {
990
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
991
    defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_ASCEND_CL)
Y
Yi Wang 已提交
992 993 994 995 996
      Tensor cpu_tensor;
      cpu_tensor.Resize(framework::make_ddim(dims));
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
Y
yuyang18 已提交
997
      is.read(static_cast<char*>(buf), size);
Y
Yi Wang 已提交
998 999
      auto dst_place = dev_ctx.GetPlace();
      framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
1000 1001 1002
      if (platform::is_npu_place(dev_ctx.GetPlace())) {
        dev_ctx.Wait();
      }
Y
Yi Wang 已提交
1003
#else
1004 1005 1006
      if (platform::is_gpu_place(dev_ctx.GetPlace())) {
        PADDLE_THROW(platform::errors::Unimplemented(
            "CUDAPlace is not supported when not compiled with CUDA"));
1007
      } else if (platform::is_xpu_place(dev_ctx.GetPlace())) {
1008 1009
        PADDLE_THROW(platform::errors::Unimplemented(
            "XPUPlace is not supported when not compiled with XPU"));
1010 1011 1012
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "NPUPlace is not supported when not compiled with NPU"));
1013
      }
Y
Yi Wang 已提交
1014 1015 1016 1017 1018
#endif
    } else {
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
Y
yuyang18 已提交
1019
      is.read(static_cast<char*>(buf), size);
Y
Yi Wang 已提交
1020 1021 1022 1023
    }
  }
}

6
633WHU 已提交
1024 1025 1026 1027
// get tensor data point by DLDataType
void* GetDstPtrByDLDataType(DLDataType type, framework::Tensor* dst,
                            const platform::Place& dst_place) {
  // vector types not currently supported
1028 1029 1030
  PADDLE_ENFORCE_LE(type.lanes, 1,
                    platform::errors::Unimplemented(
                        "Vector type is not supported currently."));
6
633WHU 已提交
1031 1032 1033 1034 1035 1036 1037

  switch (type.bits) {
    case 8:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int8_t>(dst_place));
      if (type.code == kDLUInt)
        return static_cast<void*>(dst->mutable_data<uint8_t>(dst_place));
1038 1039 1040
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1041 1042 1043 1044 1045 1046
    case 16:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int16_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(
            dst->mutable_data<paddle::platform::float16>(dst_place));
1047 1048 1049
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1050 1051 1052 1053 1054
    case 32:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int32_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(dst->mutable_data<float>(dst_place));
1055 1056 1057
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1058 1059 1060 1061 1062
    case 64:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int64_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(dst->mutable_data<double>(dst_place));
1063 1064 1065
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1066
    default:
1067 1068
      PADDLE_THROW(platform::errors::Unimplemented(
          "Unsupported DLDataType.bits %d.", type.bits));
6
633WHU 已提交
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
  }
}

void TensorFromDLPack(const ::DLTensor& dl_tensor, framework::Tensor* dst) {
  platform::CPUPlace dst_place = platform::CPUPlace();
  platform::CPUPlace src_place = platform::CPUPlace();

  std::vector<int64_t> vec;
  std::copy(dl_tensor.shape, dl_tensor.shape + dl_tensor.ndim,
            std::back_inserter(vec));

  framework::DDim vddim = framework::make_ddim(vec);

  dst->Resize(vddim);
  ::DLDataType type = dl_tensor.dtype;
  void* dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);

  auto src_ptr = static_cast<const void*>(dl_tensor.data);
  auto size = paddle::framework::product(vddim) * type.bits / 8;

  if (dl_tensor.ctx.device_type == kDLCPU) {
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
  }
1092
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
6
633WHU 已提交
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
  if (dl_tensor.ctx.device_type == kDLGPU) {
    platform::CUDAPlace dst_place =
        platform::CUDAPlace(dl_tensor.ctx.device_id);
    platform::CUDAPlace src_place =
        platform::CUDAPlace(dl_tensor.ctx.device_id);
    dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(dst_place);
    memory::Copy(
        dst_place, dst_ptr, src_place, src_ptr, size,
        reinterpret_cast<const platform::CUDADeviceContext&>(*ctx).stream());
  }
#endif
1105 1106 1107
#ifdef PADDLE_WITH_XPU
  PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
#endif
6
633WHU 已提交
1108 1109
}

1110 1111 1112 1113 1114 1115
template <typename T>
std::string format_tensor(const framework::Tensor& tensor) {
  // TODO(zhiqiu): use the print option to format tensor.
  return "NOT IMPLEMENTED";
}

1116 1117 1118 1119 1120
template <typename T>
std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) {
  auto inspect = tensor.data<T>();
  auto element_num = tensor.numel();

1121
  os << "  - data: [";
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
  // Note: int8_t && uint8_t is typedf of char, ostream unable to print properly
  if (typeid(int8_t) == typeid(T) || typeid(uint8_t) == typeid(T)) {
    if (element_num > 0) {
      os << signed(inspect[0]);
      for (int j = 1; j < element_num; ++j) {
        os << " " << signed(inspect[j]);
      }
    }
  } else {
    if (element_num > 0) {
      os << inspect[0];
      for (int j = 1; j < element_num; ++j) {
        os << " " << inspect[j];
      }
1136 1137 1138 1139 1140 1141
    }
  }
  os << "]";
  return os;
}

1142
template <>
1143
std::ostream& print_tensor<paddle::platform::complex<float>>(
1144
    std::ostream& os, const framework::Tensor& tensor) {
1145
  auto inspect = tensor.data<paddle::platform::complex<float>>();
1146 1147 1148 1149
  auto element_num = tensor.numel();

  os << "  - data: [";
  if (element_num > 0) {
1150
    os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
1151
    for (int j = 1; j < element_num; ++j) {
1152 1153
      os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
         << "j";
1154 1155 1156 1157 1158 1159 1160
    }
  }
  os << "]";
  return os;
}

template <>
1161
std::ostream& print_tensor<paddle::platform::complex<double>>(
1162
    std::ostream& os, const framework::Tensor& tensor) {
1163
  auto inspect = tensor.data<paddle::platform::complex<double>>();
1164 1165 1166 1167
  auto element_num = tensor.numel();

  os << "  - data: [";
  if (element_num > 0) {
1168
    os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
1169
    for (int j = 1; j < element_num; ++j) {
1170 1171
      os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
         << "j";
1172 1173 1174 1175 1176 1177
    }
  }
  os << "]";
  return os;
}

1178
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
1179 1180 1181
  os << "  - place: " << t.place() << "\n";
  os << "  - shape: [" << t.dims() << "]\n";
  os << "  - layout: " << DataLayoutToString(t.layout()) << "\n";
1182

1183 1184 1185 1186 1187
#ifdef PADDLE_WITH_MKLDNN
  os << "  - format: "
     << dnnl_fmt_tag2str(static_cast<dnnl_format_tag_t>(t.format())) << "\n";
#endif

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
  Tensor tensor;
  tensor.Resize(t.dims());
  if (platform::is_cpu_place(t.place())) {
    tensor.ShareDataWith(t);
  } else {
    platform::CPUPlace place;
    framework::TensorCopy(t, place, &tensor);
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    auto& dev_ctx = *pool.Get(t.place());
    dev_ctx.Wait();
  }

#define PrintTensorCallback(cpp_type, proto_type) \
  do {                                            \
    if (tensor.type() == proto_type) {            \
1203
      os << "  - dtype: " << proto_type << "\n";  \
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
      print_tensor<cpp_type>(os, tensor);         \
      return os;                                  \
    }                                             \
  } while (0)

  _ForEachDataType_(PrintTensorCallback);
  VLOG(1) << "PrintVar: unrecognized data type:" << t.type();
  return os;
}

Y
Yang Yu 已提交
1214 1215
}  // namespace framework
}  // namespace paddle