copy_kernel.cu 6.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/kernels/copy_kernel.h"
16

17 18 19 20
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/kernel_registry.h"
21 22 23

// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/memcpy.h"
24
#include "paddle/fluid/platform/device_context.h"
25

26
namespace phi {
27

28 29
template <typename Context>
void Copy(const Context& dev_ctx,
30
          const DenseTensor& src,
31
          Place dst_place,
32
          bool blocking,
33 34 35 36
          DenseTensor* dst) {
  auto* src_ptr = src.data();
  const auto& src_place = src.place();

37
  if (src_place == dst_place && paddle::platform::is_cpu_place(src_place)) {
38
    PADDLE_THROW(phi::errors::InvalidArgument(
39 40 41 42 43 44 45
        "The src and dst tensor are all CPU tensor, you should call copy "
        "function in CPU mode."));
  }

  VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
          << dst_place;

46 47 48 49 50 51 52 53
  dst->Resize(src.dims());

  void* dst_ptr = nullptr;
  if (paddle::platform::is_cpu_place(dst_place)) {
    dst_ptr = dev_ctx.HostAlloc(dst, src.dtype());
  } else {
    dst_ptr = dev_ctx.Alloc(dst, src.dtype());
  }
54

55 56 57 58 59 60
  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data async from " << src_place << " to "
            << dst_place;
    return;
  }
  VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr;
61

62
  CHECK(dst->layout() == src.layout());
63

64
  auto size = src.numel() * paddle::experimental::SizeOf(src.dtype());
65

66 67
  if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
      paddle::platform::is_cpu_place(dst_place)) {
68 69
    auto src_gpu_place = src_place;
    auto dst_cpu_place = dst_place;
70 71 72 73
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
74
        phi::errors::PreconditionNotMet(
75 76
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
77
    auto ctx_gpu_place = ctx_place;
78 79
    PADDLE_ENFORCE_EQ(src_gpu_place,
                      ctx_gpu_place,
80
                      phi::errors::Unavailable(
81 82 83 84 85
                          "Source place and context place do not match, source "
                          "place is %s, context place is %s.",
                          src_gpu_place,
                          ctx_gpu_place));
    auto stream =
86
        blocking ? nullptr
87
                 : reinterpret_cast<const phi::GPUContext&>(dev_ctx).stream();
88 89
    paddle::memory::Copy(
        dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
90 91
  } else if ((paddle::platform::is_cpu_place(src_place) ||
              paddle::platform::is_cuda_pinned_place(src_place)) &&  // NOLINT
92
             paddle::platform::is_gpu_place(dst_place)) {
93 94
    auto src_cpu_place = src_place;
    auto dst_gpu_place = dst_place;
95 96 97 98
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
99
        phi::errors::PreconditionNotMet(
100 101
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
102
    auto ctx_gpu_place = ctx_place;
103 104
    PADDLE_ENFORCE_EQ(dst_gpu_place,
                      ctx_gpu_place,
105
                      phi::errors::Unavailable(
106 107 108 109 110
                          "Destination place and context place do not match, "
                          "destination place is %s, context place is %s.",
                          dst_gpu_place,
                          ctx_gpu_place));
    auto stream =
111
        blocking ? nullptr
112
                 : reinterpret_cast<const phi::GPUContext&>(dev_ctx).stream();
113 114 115 116
    paddle::memory::Copy(
        dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
  } else if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
             paddle::platform::is_gpu_place(dst_place)) {
117 118
    auto src_gpu_place = src_place;
    auto dst_gpu_place = dst_place;
119 120 121 122
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
123
        phi::errors::PreconditionNotMet(
124 125 126
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
    auto stream =
127
        blocking ? nullptr
128
                 : reinterpret_cast<const phi::GPUContext&>(dev_ctx).stream();
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
    if (paddle::platform::is_same_place(src_place, dst_place)) {
      paddle::memory::Copy(
          dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
    } else {
      if (paddle::platform::is_same_place(ctx_place, src_place)) {
        paddle::memory::Copy(
            dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
        paddle::platform::DeviceContextPool::Instance()
            .Get(src.place())
            ->Wait();
      } else if (paddle::platform::is_same_place(ctx_place, dst_place)) {
        paddle::platform::DeviceContextPool::Instance()
            .Get(src.place())
            ->Wait();
        paddle::memory::Copy(
            dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
      } else {
146
        PADDLE_THROW(phi::errors::Unavailable(
147 148 149
            "Context place dose not match the source and destination place."));
      }
    }
150
  } else {
151
    PADDLE_THROW(phi::errors::InvalidArgument(
152
        "Place type error. Please check the place of src and dst Tensor."));
153 154
  }
}
155

156
}  // namespace phi
157

158
PD_REGISTER_GENERAL_KERNEL(
159
    copy, GPU, ALL_LAYOUT, phi::Copy<phi::GPUContext>, ALL_DTYPE) {}