tensor_util.cc 45.4 KB
Newer Older
Y
Yang Yu 已提交
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yang Yu 已提交
6

7 8 9 10 11 12 13
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yang Yu 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor_util.h"
16

C
chengduoZH 已提交
17 18
#include <algorithm>
#include <limits>
C
chengduo 已提交
19
#include <memory>
20
#include <string>
C
chengduo 已提交
21
#include <utility>
C
chengduoZH 已提交
22
#include <vector>
23

Y
yuyang18 已提交
24
#include "paddle/fluid/framework/data_type.h"
25
#include "paddle/fluid/platform/complex.h"
26
#include "paddle/fluid/platform/profiler.h"
Y
Yang Yu 已提交
27 28 29

namespace paddle {
namespace framework {
Y
Yi Wang 已提交
30 31

void TensorCopy(const Tensor& src, const platform::Place& dst_place,
F
fengjiayi 已提交
32
                const platform::DeviceContext& ctx, Tensor* dst) {
33 34 35 36 37 38
  if (&src == dst) {
    auto src_copy = src;
    TensorCopy(src_copy, dst_place, ctx, dst);
    return;
  }

M
minqiyang 已提交
39 40
  VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
          << dst_place;
Y
Yi Wang 已提交
41 42 43 44 45 46
  src.check_memory_size();

  dst->Resize(src.dims());
  dst->set_layout(src.layout());
  auto src_place = src.place();
  auto src_ptr = src.data<void>();
47 48 49 50 51 52 53 54 55
#ifdef PADDLE_WITH_MKLDNN
  dst->set_format(src.format());
  // oneDNN tensors due to padding may be of bigger size
  // than numel()*size(type())
  auto dst_ptr =
      src.layout() == DataLayout::kMKLDNN
          ? dst->mutable_data(dst_place, src.type(), src.memory_size())
          : dst->mutable_data(dst_place, src.type());
#else
Y
Yi Wang 已提交
56
  auto dst_ptr = dst->mutable_data(dst_place, src.type());
57
#endif
58 59 60 61 62 63
  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data async from " << src_place << " to "
            << dst_place;
    return;
  }

64 65 66 67 68
#ifdef PADDLE_WITH_MKLDNN
  auto size = src.layout() == DataLayout::kMKLDNN
                  ? src.memory_size()
                  : src.numel() * SizeOfType(src.type());
#else
Y
Yi Wang 已提交
69
  auto size = src.numel() * SizeOfType(src.type());
70
#endif
Y
Yi Wang 已提交
71 72

  if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
73 74
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
Y
Yi Wang 已提交
75
  }
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else if (platform::is_cpu_place(src_place) &&
             platform::is_xpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
  } else if (platform::is_xpu_place(src_place) &&
             platform::is_xpu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
#ifdef PADDLE_WITH_ASCEND_CL
  // TODO(zhiqiu): handle different condition like CUDA code below
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 stream);
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size,
                 stream);
  }
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    auto stream =
        reinterpret_cast<const platform::NPUDeviceContext&>(ctx).stream();
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 stream);
  }
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
135
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
136 137 138 139 140 141
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
  }
142
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
Y
Yi Wang 已提交
143
           platform::is_cpu_place(dst_place)) {
144 145 146
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
147 148 149
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
150 151
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
152 153 154
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
155 156
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
Y
Yi Wang 已提交
157
    auto ctx_place = ctx.GetPlace();
158 159 160 161 162
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
163
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
164 165 166 167 168
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
                      platform::errors::Unavailable(
                          "Source place and context place do not match, source "
                          "place is %s, context place is %s.",
                          src_gpu_place, ctx_gpu_place));
169
    auto stream =
F
fengjiayi 已提交
170
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
171
    memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
172 173 174
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
175 176
    auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
Y
Yi Wang 已提交
177
    auto ctx_place = ctx.GetPlace();
178 179 180 181 182
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
183
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
184 185 186 187 188
    PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
                      platform::errors::Unavailable(
                          "Destination place and context place do not match, "
                          "destination place is %s, context place is %s.",
                          dst_gpu_place, ctx_gpu_place));
189
    auto stream =
F
fengjiayi 已提交
190
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
191
    memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
192 193 194
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cuda_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from GPU memory to CUDA Pinned memory, current "
                          "device context place should be GPU."));
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
    PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
                      platform::errors::PreconditionNotMet(
                          "The source GPU device and current device context do "
                          "not match. The source GPU device number is %d, but "
                          "device context GPU number is %d.",
                          src_gpu_place.device, ctx_gpu_place.device));
    auto stream =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
    memory::Copy(dst_cuda_pinned_place, dst_ptr, src_gpu_place, src_ptr, size,
                 stream);
215 216 217
  }
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
    auto src_cuda_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
    auto ctx_place = ctx.GetPlace();
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
                      platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from CUDA Pinned memory to GPU memory, current "
                          "device context place should be GPU."));
    auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
    PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
                      platform::errors::PreconditionNotMet(
                          "The target GPU device and current device context do "
                          "not match. The target GPU device number is %d, but "
                          "device context GPU number is %d.",
                          dst_gpu_place.device, ctx_gpu_place.device));
    auto stream =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
    memory::Copy(dst_gpu_place, dst_ptr, src_cuda_pinned_place, src_ptr, size,
                 stream);
238 239 240
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
241 242
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
Y
Yi Wang 已提交
243
    auto ctx_place = ctx.GetPlace();
244 245 246 247 248
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx_place), true,
        platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
249
    auto stream =
F
fengjiayi 已提交
250
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
C
chengduo 已提交
251 252 253 254 255 256 257
    if (platform::is_same_place(src_place, dst_place)) {
      memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                   stream);
    } else {
      if (platform::is_same_place(ctx_place, src_place)) {
        memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                     stream);
C
chengduo 已提交
258
        platform::DeviceContextPool::Instance().Get(src.place())->Wait();
C
chengduo 已提交
259
      } else if (platform::is_same_place(ctx_place, dst_place)) {
C
chengduo 已提交
260
        platform::DeviceContextPool::Instance().Get(src.place())->Wait();
C
chengduo 已提交
261 262 263
        memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
                     stream);
      } else {
264 265
        PADDLE_THROW(platform::errors::Unavailable(
            "Context place dose not match the source and destination place."));
C
chengduo 已提交
266 267
      }
    }
268 269
  }
  else {  // NOLINT
270 271
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copying from %s to %s is not supported.", src_place, dst_place));
Y
Yi Wang 已提交
272 273 274 275 276 277 278 279
  }
#endif
}

void TensorCopy(const Tensor& src, const platform::Place& dst_place,
                Tensor* dst) {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  const platform::DeviceContext* dev_ctx;
C
chengduo 已提交
280
  if (platform::is_gpu_place(dst_place)) {
Y
Yi Wang 已提交
281
    dev_ctx = pool.Get(dst_place);
C
chengduo 已提交
282 283
  } else {
    dev_ctx = pool.Get(src.place());
Y
Yi Wang 已提交
284 285 286 287
  }
  TensorCopy(src, dst_place, *dev_ctx, dst);
}

F
fengjiayi 已提交
288 289
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
                    Tensor* dst) {
290 291 292 293 294 295
  if (&src == dst) {
    auto src_copy = src;
    TensorCopySync(src_copy, dst_place, dst);
    return;
  }

M
minqiyang 已提交
296 297
  VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
          << " to " << dst_place;
F
fengjiayi 已提交
298 299 300
  src.check_memory_size();
  dst->Resize(src.dims());
  dst->set_layout(src.layout());
J
Jacek Czaja 已提交
301 302 303
#ifdef PADDLE_WITH_MKLDNN
  dst->set_format(src.format());
#endif
F
fengjiayi 已提交
304 305 306
  auto src_place = src.place();
  auto src_ptr = src.data<void>();
  auto dst_ptr = dst->mutable_data(dst_place, src.type());
307 308 309 310 311 312 313

  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data from " << src_place << " to "
            << dst_place;
    return;
  }

F
fengjiayi 已提交
314 315
  auto size = src.numel() * SizeOfType(src.type());
  if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
316 317
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
F
fengjiayi 已提交
318
  }
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
#ifdef PADDLE_WITH_XPU
  else if (platform::is_xpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else if (platform::is_cpu_place(src_place) &&  // NOLINT
             platform::is_xpu_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
  } else if (platform::is_xpu_place(src_place) &&  // NOLINT
             platform::is_xpu_place(dst_place)) {
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data async from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
  } else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
#ifdef PADDLE_WITH_ASCEND_CL
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {  /* npu -> cpu*/
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {  /* cpu -> npu*/
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else if (platform::is_npu_place(src_place) &&  // NOLINT
           platform::is_npu_place(dst_place)) {  /* npu -> npu*/
    if (src_ptr == dst_ptr) {
      VLOG(3) << "Skip copy the same data sync from " << src_place << " to "
              << dst_place;
      return;
    }
    memory::Copy(BOOST_GET_CONST(platform::NPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::NPUPlace, src_place), src_ptr, size,
                 nullptr);
  }
  else {  // NOLINT
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
  }
#endif
371
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
372 373 374 375 376 377
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
  }
378
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
F
fengjiayi 已提交
379
           platform::is_cpu_place(dst_place)) {
380 381 382
    memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
                 size);
383 384 385
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
386 387
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
388 389 390
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cuda_pinned_place(dst_place)) {
391 392 393
    memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
                 BOOST_GET_CONST(platform::CUDAPlace, src_place), src_ptr, size,
                 nullptr);
394 395 396
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_cpu_place(dst_place)) {
397 398
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
F
fengjiayi 已提交
399
    memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
400 401 402
  }
  else if (platform::is_cpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
403 404
    auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
F
fengjiayi 已提交
405
    memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr);
406 407 408
  }
  else if (platform::is_gpu_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
409 410
    auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
F
fengjiayi 已提交
411
    memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
412 413 414
  }
  else if (platform::is_cuda_pinned_place(src_place) &&  // NOLINT
           platform::is_gpu_place(dst_place)) {
415 416 417
    auto src_pinned_place =
        BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
W
Wu Yi 已提交
418 419
    memory::Copy(dst_gpu_place, dst_ptr, src_pinned_place, src_ptr, size,
                 nullptr);
420 421
  }
  else {  // NOLINT
422 423
    PADDLE_THROW(platform::errors::Unimplemented(
        "Copy from %s to %s is not supported.", src_place, dst_place));
F
fengjiayi 已提交
424 425 426 427
  }
#endif
}

Y
Yang Yu 已提交
428 429 430 431 432 433 434 435 436 437 438 439
template <typename Predicate, typename DevCtx>
struct AnyDTypeVisitor {
  Predicate predicate_;
  const Tensor& tensor_;
  const DevCtx& ctx_;
  Tensor* out_;

  AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
                  Tensor* out)
      : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}

  template <typename T>
D
dzhwinter 已提交
440
  void apply() const {
Y
Yang Yu 已提交
441 442
    auto t = EigenVector<T>::Flatten(tensor_);
    auto o = EigenScalar<bool>::From(*out_);
Y
Yang Yu 已提交
443
    // return any of predicate_(t) is true.
Y
Yang Yu 已提交
444 445 446 447 448 449 450
    o.device(*ctx_.eigen_device()) = predicate_(t).any();
  }
};

template <typename Predicate, typename DevCtx>
inline void AnyImpl(Predicate predicate, const framework::Tensor& tensor,
                    const DevCtx& ctx, framework::Tensor* out) {
Y
Yu Yang 已提交
451 452
  VisitDataType(tensor.type(), AnyDTypeVisitor<Predicate, DevCtx>(
                                   predicate, tensor, ctx, out));
Y
Yang Yu 已提交
453 454 455
}

template <typename Predicate>
456 457
class AnyVisitor : public boost::static_visitor<bool> {
 private:
Y
Yang Yu 已提交
458 459 460
  const framework::Tensor& tensor_;
  Predicate predicate_;

461 462 463 464 465 466 467 468 469 470 471 472 473
  bool GetResultHelper(const framework::Tensor& out,
                       const platform::Place& place) const {
    platform::CPUPlace cpu;
    framework::Tensor tmp;
    tmp.Resize({1});
    tmp.mutable_data<bool>(cpu);
    auto ctx = platform::DeviceContextPool::Instance().Get(place);
    ctx->Wait();
    TensorCopy(out, cpu, *ctx, &tmp);
    ctx->Wait();
    return GetResult(tmp, cpu);
  }

474
 public:
Y
Yang Yu 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487
  AnyVisitor(const framework::Tensor& tensor, Predicate predicate)
      : tensor_(tensor), predicate_(std::move(predicate)) {}

  template <typename Place>
  bool operator()(const Place& place) const {
    framework::Tensor out;
    out.Resize({1});
    out.mutable_data<bool>(place);
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    AnyImpl(predicate_, tensor_, *ctx, &out);
    return this->GetResult(out, place);
  }

488 489 490 491 492
  bool GetResult(const framework::Tensor& out,
                 const platform::XPUPlace& xpu) const {
    return GetResultHelper(out, xpu);
  }

Y
Yang Yu 已提交
493 494
  bool GetResult(const framework::Tensor& out,
                 const platform::CUDAPlace& gpu) const {
495
    return GetResultHelper(out, gpu);
Y
Yang Yu 已提交
496 497
  }

498 499 500 501 502 503 504
  bool GetResult(const framework::Tensor& out,
                 const platform::NPUPlace& npu) const {
    PADDLE_THROW(
        platform::errors::Unimplemented("Not supported on place (%s) ", npu));
    // return GetResultHelper(out, npu);
  }

505 506 507 508 509
  bool GetResult(const framework::Tensor& out,
                 const platform::NPUPinnedPlace& cpu) const {
    return *out.data<bool>();
  }

Y
Yang Yu 已提交
510 511 512 513
  bool GetResult(const framework::Tensor& out,
                 const platform::CPUPlace& cpu) const {
    return *out.data<bool>();
  }
C
chengduoZH 已提交
514 515 516 517 518

  bool GetResult(const framework::Tensor& out,
                 const platform::CUDAPinnedPlace& cpu) const {
    return *out.data<bool>();
  }
Y
Yang Yu 已提交
519 520
};

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
template <typename Predicate>
class AnyOutVisitor : public boost::static_visitor<> {
 private:
  const framework::Tensor& tensor_;
  mutable framework::Tensor* out_;
  Predicate predicate_;

 public:
  AnyOutVisitor(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out)
      : tensor_(tensor), out_(out), predicate_(std::move(predicate)) {}

  template <typename Place>
  void operator()(const Place& place) const {
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    out_->Resize({1});
    out_->mutable_data<bool>(place);
    AnyImpl(predicate_, tensor_, *ctx, out_);
  }
};

Y
Yang Yu 已提交
542 543 544 545 546 547 548
template <typename Predicate>
inline bool Any(const framework::Tensor& tensor, Predicate predicate) {
  AnyVisitor<Predicate> visitor(tensor, predicate);
  auto place = tensor.place();
  return platform::VisitPlace(place, visitor);
}

549 550 551 552 553 554 555 556
template <typename Predicate>
inline void Any(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out) {
  AnyOutVisitor<Predicate> visitor(tensor, predicate, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

J
Jack Zhou 已提交
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
template <typename Predicate, typename DevCtx>
struct AllDTypeVisitor {
  Predicate predicate_;
  const Tensor& tensor_;
  const DevCtx& ctx_;
  Tensor* out_;

  AllDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
                  Tensor* out)
      : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}

  template <typename T>
  void apply() const {
    auto t = EigenVector<T>::Flatten(tensor_);
    auto o = EigenVector<bool>::Flatten(*out_);
    o.device(*ctx_.eigen_device()) = predicate_(t);
  }
};

template <typename Predicate, typename DevCtx>
inline void AllImpl(Predicate predicate, const framework::Tensor& tensor,
                    const DevCtx& ctx, framework::Tensor* out) {
  VisitDataType(tensor.type(), AllDTypeVisitor<Predicate, DevCtx>(
                                   predicate, tensor, ctx, out));
}

template <typename Predicate>
class AllOutVisitor : public boost::static_visitor<> {
 private:
  const framework::Tensor& tensor_;
  mutable framework::Tensor* out_;
  Predicate predicate_;

 public:
  AllOutVisitor(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out)
      : tensor_(tensor), out_(out), predicate_(predicate) {}

  template <typename Place>
  void operator()(const Place& place) const {
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
    out_->Resize(tensor_.dims());
    out_->mutable_data<bool>(place);
    AllImpl(predicate_, tensor_, *ctx, out_);
  }
};

template <typename Predicate>
inline void All(const framework::Tensor& tensor, Predicate predicate,
                framework::Tensor* out) {
  AllOutVisitor<Predicate> visitor(tensor, predicate, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

Y
Yi Wang 已提交
612
struct ContainsNANPredicate {
Y
Yang Yu 已提交
613 614 615
  template <typename T>
  auto operator()(const T& eigen_vec) const
      -> decltype(std::declval<T>().isnan()) {
Y
Yang Yu 已提交
616
    // Cast eigen_vector to vector of bool. true if is inf.
Y
Yang Yu 已提交
617 618 619 620
    return eigen_vec.isnan();
  }
};

Y
Yi Wang 已提交
621 622
bool TensorContainsNAN(const framework::Tensor& tensor) {
  ContainsNANPredicate predicate;
Y
Yang Yu 已提交
623 624 625
  return Any(tensor, predicate);
}

626 627 628 629 630 631
void TensorContainsNAN(const framework::Tensor& tensor,
                       framework::Tensor* out) {
  ContainsNANPredicate predicate;
  Any(tensor, predicate, out);
}

J
Jack Zhou 已提交
632 633 634 635 636 637
void TensorContainsNANV2(const framework::Tensor& tensor,
                         framework::Tensor* out) {
  ContainsNANPredicate predicate;
  All(tensor, predicate, out);
}

Y
Yi Wang 已提交
638
struct ContainsInfPredicate {
Y
Yang Yu 已提交
639 640 641
  template <typename T>
  auto operator()(const T& eigen_vec) const
      -> decltype(std::declval<T>().isinf()) {
Y
Yang Yu 已提交
642
    // Cast eigen_vector to vector of bool. true if is inf.
Y
Yang Yu 已提交
643 644 645 646
    return eigen_vec.isinf();
  }
};

Y
Yi Wang 已提交
647 648
bool TensorContainsInf(const framework::Tensor& tensor) {
  ContainsInfPredicate predicate;
Y
Yang Yu 已提交
649 650 651
  return Any(tensor, predicate);
}

652 653 654 655 656 657
void TensorContainsInf(const framework::Tensor& tensor,
                       framework::Tensor* out) {
  ContainsInfPredicate predicate;
  Any(tensor, predicate, out);
}

J
Jack Zhou 已提交
658 659 660 661 662 663
void TensorContainsInfV2(const framework::Tensor& tensor,
                         framework::Tensor* out) {
  ContainsInfPredicate predicate;
  All(tensor, predicate, out);
}

664 665 666 667 668 669 670 671 672 673
// NOTE(dzhwinter):
// Isfinite need a AllVisitor to loop through all the elements.
// We choose two cuda call instead of one allvisitor. The AllVisitor
// should be implemented if the performance hurts.
bool TensorIsfinite(const framework::Tensor& tensor) {
  ContainsInfPredicate pred_inf;
  ContainsNANPredicate pred_nan;
  return !Any(tensor, pred_inf) && !Any(tensor, pred_nan);
}

674
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
675
template <typename T>
J
Jack Zhou 已提交
676 677
static inline void __global__ BothFalse(const T* cmp, T* out, int element_num) {
  CUDA_KERNEL_LOOP(i, element_num) { out[i] = (!cmp[i]) && (!out[i]); }
678 679 680 681 682 683 684 685 686 687 688 689 690 691
}
#endif

struct BothFalseVisitor : public boost::static_visitor<> {
  const framework::Tensor& in_;
  mutable framework::Tensor* out_;
  BothFalseVisitor(const framework::Tensor& in, framework::Tensor* out)
      : in_(in), out_(out) {}

  template <typename Place>
  void operator()(const Place& place) const {
    VisitorImpl(place);
  }

692 693 694 695
  void VisitorImpl(const platform::XPUPlace& xpu) const {
    PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
  }

696
  void VisitorImpl(const platform::CUDAPlace& gpu) const {
697
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
698
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(gpu);
J
Jack Zhou 已提交
699 700 701 702 703 704 705 706 707 708
    constexpr int MAX_BLOCK_DIM = 512;
    const int MAX_GRID_DIM = ctx->GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
    int element_num = in_.numel();
    int block_size = (element_num >= MAX_BLOCK_DIM)
                         ? MAX_BLOCK_DIM
                         : (1 << static_cast<int>(std::log2(element_num)));
    int grid_size = element_num / block_size;
    grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
    BothFalse<bool><<<grid_size, block_size, 0, ctx->stream()>>>(
        in_.data<bool>(), out_->mutable_data<bool>(gpu), element_num);
709 710 711
#endif
  }

712 713 714 715
  void VisitorImpl(const platform::NPUPlace& npu) const {
    // TODO(zhiqiu)
  }

716
  void VisitorImpl(const platform::CPUPlace& cpu) const {
J
Jack Zhou 已提交
717 718 719 720 721 722 723 724
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
725 726 727 728
  }

  void VisitorImpl(
      const platform::CUDAPinnedPlace& cpu /* equals to cpu*/) const {
J
Jack Zhou 已提交
729 730 731 732 733 734 735 736
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
737
  }
738 739 740 741 742 743 744 745 746 747 748 749

  void VisitorImpl(
      const platform::NPUPinnedPlace& cpu /* equals to cpu*/) const {
    int num = in_.numel();
    const bool* in_ptr = in_.data<bool>();
    bool* out_ptr = out_->data<bool>();
    for (int i = 0; i < num; ++i) {
      bool lhs = !in_ptr[i];
      bool rhs = !out_ptr[i];
      out_ptr[i] = lhs && rhs;
    }
  }
750 751 752 753 754 755 756 757 758 759 760
};

void TensorIsfinite(const framework::Tensor& tensor, framework::Tensor* out) {
  framework::Tensor tmp;
  TensorContainsInf(tensor, &tmp);
  TensorContainsNAN(tensor, out);
  BothFalseVisitor visitor(tmp, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

J
Jack Zhou 已提交
761 762 763 764 765 766 767 768 769
void TensorIsfiniteV2(const framework::Tensor& tensor, framework::Tensor* out) {
  framework::Tensor tmp;
  TensorContainsInfV2(tensor, &tmp);
  TensorContainsNANV2(tensor, out);
  BothFalseVisitor visitor(tmp, out);
  auto place = tensor.place();
  platform::VisitPlace(place, visitor);
}

Y
Yi Wang 已提交
770 771 772 773 774 775 776 777 778 779
void TensorToStream(std::ostream& os, const Tensor& tensor,
                    const platform::DeviceContext& dev_ctx) {
  {  // the 1st field, uint32_t version
    constexpr uint32_t version = 0;
    os.write(reinterpret_cast<const char*>(&version), sizeof(version));
  }
  {  // the 2nd field, tensor description
     // int32_t  size
     // void*    protobuf message
    proto::VarType::TensorDesc desc;
Y
Yu Yang 已提交
780
    desc.set_data_type(tensor.type());
Y
Yi Wang 已提交
781 782 783 784 785 786 787 788 789 790
    auto dims = framework::vectorize(tensor.dims());
    auto* pb_dims = desc.mutable_dims();
    pb_dims->Resize(static_cast<int>(dims.size()), 0);
    std::copy(dims.begin(), dims.end(), pb_dims->begin());
    int32_t size = desc.ByteSize();
    os.write(reinterpret_cast<const char*>(&size), sizeof(size));
    auto out = desc.SerializeAsString();
    os.write(out.data(), size);
  }
  {  // the 3rd field, tensor data
Y
yuyang18 已提交
791 792
    uint64_t size = tensor.numel() * framework::SizeOfType(tensor.type());

Y
Yi Wang 已提交
793
    auto* data_ptr = tensor.data<void>();
W
wanghuancoder 已提交
794
    PADDLE_ENFORCE_LT(size, (std::numeric_limits<std::streamsize>::max)(),
T
tangwei12 已提交
795 796
                      platform::errors::ResourceExhausted(
                          "tensor size %d overflow when writing tensor", size));
Y
Yi Wang 已提交
797
    if (platform::is_gpu_place(tensor.place())) {
798
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yi Wang 已提交
799 800 801 802 803 804 805 806 807
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& gpu_dev_ctx =
          static_cast<const platform::CUDADeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
808
                     BOOST_GET_CONST(platform::CUDAPlace, tensor.place()),
Y
Yi Wang 已提交
809 810 811 812 813 814 815 816
                     reinterpret_cast<const void*>(data), size_to_write,
                     gpu_dev_ctx.stream());
        gpu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
T
tangwei12 已提交
817 818
      PADDLE_THROW(platform::errors::Unimplemented(
          "CUDAPlace is not supported when not compiled with CUDA"));
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
#endif
    } else if (platform::is_xpu_place(tensor.place())) {
#ifdef PADDLE_WITH_XPU
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& xpu_dev_ctx =
          static_cast<const platform::XPUDeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
                     BOOST_GET_CONST(platform::XPUPlace, tensor.place()),
                     reinterpret_cast<const void*>(data), size_to_write);
        xpu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "XPUPlace is not supported when not compiled with XPU"));
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
#endif
    } else if (platform::is_npu_place(tensor.place())) {
#ifdef PADDLE_WITH_ASCEND_CL
      constexpr size_t kBufSize = 1024 * 1024 * 64;  // 64MB
      std::unique_ptr<char[]> buf(new char[kBufSize]);
      auto& npu_dev_ctx =
          static_cast<const platform::NPUDeviceContext&>(dev_ctx);
      platform::CPUPlace cpu;
      uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
      while (size != 0) {
        size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
        memory::Copy(cpu, buf.get(),
                     BOOST_GET_CONST(platform::NPUPlace, tensor.place()),
                     reinterpret_cast<const void*>(data), size_to_write,
                     npu_dev_ctx.stream());
        npu_dev_ctx.Wait();
        os.write(buf.get(), size_to_write);
        data += size_to_write;
        size -= size_to_write;
      }
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "NPUPlace is not supported when not compiled with NPU"));
Y
Yi Wang 已提交
864 865 866 867 868 869 870 871 872 873 874 875 876 877
#endif
    } else {
      os.write(static_cast<const char*>(data_ptr),
               static_cast<std::streamsize>(size));
    }
  }
}

struct DeserializedDataFunctor {
  DeserializedDataFunctor(void** buf, Tensor* tensor,
                          const platform::Place& place)
      : buf_(buf), tensor_(tensor), place_(place) {}

  template <typename T>
D
dzhwinter 已提交
878
  void apply() {
Y
Yi Wang 已提交
879 880 881 882 883 884 885 886
    *buf_ = tensor_->mutable_data<T>(place_);
  }

  void** buf_;
  Tensor* tensor_;
  platform::Place place_;
};

T
tangwei12 已提交
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx,
                      const size_t& seek, const std::vector<int64_t>& shape) {
  uint32_t version;
  is.read(reinterpret_cast<char*>(&version), sizeof(version));

  PADDLE_ENFORCE_EQ(
      version, 0U,
      platform::errors::InvalidArgument(
          "tensor version %u is not supported, Only version 0 is supported",
          version));

  proto::VarType::TensorDesc desc;
  {  // int32_t size
    // proto buffer
    int32_t size;
    is.read(reinterpret_cast<char*>(&size), sizeof(size));
    std::unique_ptr<char[]> buf(new char[size]);
    is.read(reinterpret_cast<char*>(buf.get()), size);
    PADDLE_ENFORCE_EQ(
        desc.ParseFromArray(buf.get(), size), true,
        platform::errors::InvalidArgument("Cannot parse tensor desc"));
  }
  {  // read tensor
    tensor->Resize(framework::make_ddim(shape));
    size_t seekg = seek * framework::SizeOfType(desc.data_type());
    is.seekg(seekg, is.cur);

    void* buf;
    auto ctx = platform::CPUDeviceContext();
    size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
918
    if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
919 920
        platform::is_xpu_place(dev_ctx.GetPlace()) ||
        platform::is_npu_place(dev_ctx.GetPlace())) {
921
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
922
    defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_ASCEND_CL)
T
tangwei12 已提交
923 924 925 926 927 928 929 930
      Tensor cpu_tensor;
      cpu_tensor.Resize(framework::make_ddim(shape));
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
      is.read(static_cast<char*>(buf), size);
      auto dst_place = dev_ctx.GetPlace();
      framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
931 932 933
      if (platform::is_npu_place(dev_ctx.GetPlace())) {
        dev_ctx.Wait();
      }
T
tangwei12 已提交
934
#else
935 936 937
      if (platform::is_gpu_place(dev_ctx.GetPlace())) {
        PADDLE_THROW(platform::errors::Unimplemented(
            "CUDAPlace is not supported when not compiled with CUDA"));
938
      } else if (platform::is_xpu_place(dev_ctx.GetPlace())) {
939 940
        PADDLE_THROW(platform::errors::Unimplemented(
            "XPUPlace is not supported when not compiled with XPU"));
941 942 943
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "NPUPlace is not supported when not compiled with NPU"));
944
      }
T
tangwei12 已提交
945 946 947 948 949 950 951 952 953 954
#endif
    } else {
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
      is.read(static_cast<char*>(buf), size);
    }
  }
}

Y
Yi Wang 已提交
955 956 957 958
void TensorFromStream(std::istream& is, Tensor* tensor,
                      const platform::DeviceContext& dev_ctx) {
  uint32_t version;
  is.read(reinterpret_cast<char*>(&version), sizeof(version));
T
tangwei12 已提交
959 960 961 962 963
  PADDLE_ENFORCE_EQ(
      version, 0U,
      platform::errors::InvalidArgument(
          "tensor version %u is not supported, Only version 0 is supported",
          version));
Y
Yi Wang 已提交
964 965 966 967 968 969 970
  proto::VarType::TensorDesc desc;
  {  // int32_t size
     // proto buffer
    int32_t size;
    is.read(reinterpret_cast<char*>(&size), sizeof(size));
    std::unique_ptr<char[]> buf(new char[size]);
    is.read(reinterpret_cast<char*>(buf.get()), size);
T
tangwei12 已提交
971 972 973
    PADDLE_ENFORCE_EQ(
        desc.ParseFromArray(buf.get(), size), true,
        platform::errors::InvalidArgument("Cannot parse tensor desc"));
Y
Yi Wang 已提交
974 975 976 977 978 979 980 981
  }
  {  // read tensor
    std::vector<int64_t> dims;
    dims.reserve(static_cast<size_t>(desc.dims().size()));
    std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
    tensor->Resize(framework::make_ddim(dims));
    void* buf;
    auto ctx = platform::CPUDeviceContext();
Y
Yu Yang 已提交
982
    size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
983
    if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
984 985
        platform::is_xpu_place(dev_ctx.GetPlace()) ||
        platform::is_npu_place(dev_ctx.GetPlace())) {
986
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
987
    defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_ASCEND_CL)
Y
Yi Wang 已提交
988 989 990 991 992
      Tensor cpu_tensor;
      cpu_tensor.Resize(framework::make_ddim(dims));
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
Y
yuyang18 已提交
993
      is.read(static_cast<char*>(buf), size);
Y
Yi Wang 已提交
994 995
      auto dst_place = dev_ctx.GetPlace();
      framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
996 997 998
      if (platform::is_npu_place(dev_ctx.GetPlace())) {
        dev_ctx.Wait();
      }
Y
Yi Wang 已提交
999
#else
1000 1001 1002
      if (platform::is_gpu_place(dev_ctx.GetPlace())) {
        PADDLE_THROW(platform::errors::Unimplemented(
            "CUDAPlace is not supported when not compiled with CUDA"));
1003
      } else if (platform::is_xpu_place(dev_ctx.GetPlace())) {
1004 1005
        PADDLE_THROW(platform::errors::Unimplemented(
            "XPUPlace is not supported when not compiled with XPU"));
1006 1007 1008
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "NPUPlace is not supported when not compiled with NPU"));
1009
      }
Y
Yi Wang 已提交
1010 1011 1012 1013 1014
#endif
    } else {
      framework::VisitDataType(
          desc.data_type(),
          DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
Y
yuyang18 已提交
1015
      is.read(static_cast<char*>(buf), size);
Y
Yi Wang 已提交
1016 1017 1018 1019
    }
  }
}

6
633WHU 已提交
1020 1021 1022 1023
// get tensor data point by DLDataType
void* GetDstPtrByDLDataType(DLDataType type, framework::Tensor* dst,
                            const platform::Place& dst_place) {
  // vector types not currently supported
1024 1025 1026
  PADDLE_ENFORCE_LE(type.lanes, 1,
                    platform::errors::Unimplemented(
                        "Vector type is not supported currently."));
6
633WHU 已提交
1027 1028 1029 1030 1031 1032 1033

  switch (type.bits) {
    case 8:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int8_t>(dst_place));
      if (type.code == kDLUInt)
        return static_cast<void*>(dst->mutable_data<uint8_t>(dst_place));
1034 1035 1036
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1037 1038 1039 1040 1041 1042
    case 16:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int16_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(
            dst->mutable_data<paddle::platform::float16>(dst_place));
1043 1044 1045
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1046 1047 1048 1049 1050
    case 32:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int32_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(dst->mutable_data<float>(dst_place));
1051 1052 1053
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1054 1055 1056 1057 1058
    case 64:
      if (type.code == kDLInt)
        return static_cast<void*>(dst->mutable_data<int64_t>(dst_place));
      if (type.code == kDLFloat)
        return static_cast<void*>(dst->mutable_data<double>(dst_place));
1059 1060 1061
      PADDLE_THROW(platform::errors::Unimplemented(
          "DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
          type.code, type.bits));
6
633WHU 已提交
1062
    default:
1063 1064
      PADDLE_THROW(platform::errors::Unimplemented(
          "Unsupported DLDataType.bits %d.", type.bits));
6
633WHU 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
  }
}

void TensorFromDLPack(const ::DLTensor& dl_tensor, framework::Tensor* dst) {
  platform::CPUPlace dst_place = platform::CPUPlace();
  platform::CPUPlace src_place = platform::CPUPlace();

  std::vector<int64_t> vec;
  std::copy(dl_tensor.shape, dl_tensor.shape + dl_tensor.ndim,
            std::back_inserter(vec));

  framework::DDim vddim = framework::make_ddim(vec);

  dst->Resize(vddim);
  ::DLDataType type = dl_tensor.dtype;
  void* dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);

  auto src_ptr = static_cast<const void*>(dl_tensor.data);
  auto size = paddle::framework::product(vddim) * type.bits / 8;

  if (dl_tensor.ctx.device_type == kDLCPU) {
    memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
  }
1088
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
6
633WHU 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
  if (dl_tensor.ctx.device_type == kDLGPU) {
    platform::CUDAPlace dst_place =
        platform::CUDAPlace(dl_tensor.ctx.device_id);
    platform::CUDAPlace src_place =
        platform::CUDAPlace(dl_tensor.ctx.device_id);
    dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);
    auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(dst_place);
    memory::Copy(
        dst_place, dst_ptr, src_place, src_ptr, size,
        reinterpret_cast<const platform::CUDADeviceContext&>(*ctx).stream());
  }
#endif
1101 1102 1103
#ifdef PADDLE_WITH_XPU
  PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
#endif
6
633WHU 已提交
1104 1105
}

1106 1107 1108 1109 1110 1111
template <typename T>
std::string format_tensor(const framework::Tensor& tensor) {
  // TODO(zhiqiu): use the print option to format tensor.
  return "NOT IMPLEMENTED";
}

1112 1113 1114 1115 1116
template <typename T>
std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) {
  auto inspect = tensor.data<T>();
  auto element_num = tensor.numel();

1117
  os << "  - data: [";
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
  // Note: int8_t && uint8_t is typedf of char, ostream unable to print properly
  if (typeid(int8_t) == typeid(T) || typeid(uint8_t) == typeid(T)) {
    if (element_num > 0) {
      os << signed(inspect[0]);
      for (int j = 1; j < element_num; ++j) {
        os << " " << signed(inspect[j]);
      }
    }
  } else {
    if (element_num > 0) {
      os << inspect[0];
      for (int j = 1; j < element_num; ++j) {
        os << " " << inspect[j];
      }
1132 1133 1134 1135 1136 1137
    }
  }
  os << "]";
  return os;
}

1138
template <>
1139
std::ostream& print_tensor<paddle::platform::complex<float>>(
1140
    std::ostream& os, const framework::Tensor& tensor) {
1141
  auto inspect = tensor.data<paddle::platform::complex<float>>();
1142 1143 1144 1145
  auto element_num = tensor.numel();

  os << "  - data: [";
  if (element_num > 0) {
1146
    os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
1147
    for (int j = 1; j < element_num; ++j) {
1148 1149
      os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
         << "j";
1150 1151 1152 1153 1154 1155 1156
    }
  }
  os << "]";
  return os;
}

template <>
1157
std::ostream& print_tensor<paddle::platform::complex<double>>(
1158
    std::ostream& os, const framework::Tensor& tensor) {
1159
  auto inspect = tensor.data<paddle::platform::complex<double>>();
1160 1161 1162 1163
  auto element_num = tensor.numel();

  os << "  - data: [";
  if (element_num > 0) {
1164
    os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
1165
    for (int j = 1; j < element_num; ++j) {
1166 1167
      os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
         << "j";
1168 1169 1170 1171 1172 1173
    }
  }
  os << "]";
  return os;
}

1174
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
1175 1176 1177
  os << "  - place: " << t.place() << "\n";
  os << "  - shape: [" << t.dims() << "]\n";
  os << "  - layout: " << DataLayoutToString(t.layout()) << "\n";
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193

  Tensor tensor;
  tensor.Resize(t.dims());
  if (platform::is_cpu_place(t.place())) {
    tensor.ShareDataWith(t);
  } else {
    platform::CPUPlace place;
    framework::TensorCopy(t, place, &tensor);
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    auto& dev_ctx = *pool.Get(t.place());
    dev_ctx.Wait();
  }

#define PrintTensorCallback(cpp_type, proto_type) \
  do {                                            \
    if (tensor.type() == proto_type) {            \
1194
      os << "  - dtype: " << proto_type << "\n";  \
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
      print_tensor<cpp_type>(os, tensor);         \
      return os;                                  \
    }                                             \
  } while (0)

  _ForEachDataType_(PrintTensorCallback);
  VLOG(1) << "PrintVar: unrecognized data type:" << t.type();
  return os;
}

Y
Yang Yu 已提交
1205 1206
}  // namespace framework
}  // namespace paddle