copy_kernel.cu 9.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16 17
#include "paddle/pten/kernels/copy_kernel.h"

#include "paddle/pten/backends/gpu/gpu_context.h"
18 19 20
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/kernel_registry.h"
21 22 23

// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/memcpy.h"
24 25 26

namespace pten {

27 28
template <typename Context>
void Copy(const Context& dev_ctx,
29
          const DenseTensor& src,
30
          bool blocking,
31 32 33
          DenseTensor* dst) {
  auto* src_ptr = src.data();
  const auto& src_place = src.place();
34
  auto dst_place = dst->place();
35

36 37 38 39 40 41 42 43 44
  if (src_place == dst_place && paddle::platform::is_cpu_place(src_place)) {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
        "The src and dst tensor are all CPU tensor, you should call copy "
        "function in CPU mode."));
  }

  VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
          << dst_place;

45
  dst->ResizeAndAllocate(src.dims());
46
  auto* dst_ptr = dst->mutable_data(dst_place);
47

48 49 50 51 52 53
  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data async from " << src_place << " to "
            << dst_place;
    return;
  }
  VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr;
54

55
  CHECK(dst->layout() == src.layout());
56

57 58
  auto size = src.numel() *
              paddle::framework::SizeOfType(TransToProtoVarType(src.dtype()));
59 60 61

  if (paddle::platform::is_cuda_pinned_place(src_place) &&  // NOLINT
      paddle::platform::is_cuda_pinned_place(dst_place)) {
62
    paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
63 64
  } else if (paddle::platform::is_cuda_pinned_place(src_place) &&  // NOLINT
             paddle::platform::is_cpu_place(dst_place)) {
65
    paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
66 67
  } else if (paddle::platform::is_cpu_place(src_place) &&  // NOLINT
             paddle::platform::is_cuda_pinned_place(dst_place)) {
68
    paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
69 70
  } else if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
             paddle::platform::is_cpu_place(dst_place)) {
71 72
    auto src_gpu_place = src_place;
    auto dst_cpu_place = dst_place;
73 74 75 76 77 78 79
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
        paddle::platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
80
    auto ctx_gpu_place = ctx_place;
81 82 83 84 85 86 87 88
    PADDLE_ENFORCE_EQ(src_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::Unavailable(
                          "Source place and context place do not match, source "
                          "place is %s, context place is %s.",
                          src_gpu_place,
                          ctx_gpu_place));
    auto stream =
89 90 91 92
        blocking ? nullptr
                 : reinterpret_cast<const paddle::platform::CUDADeviceContext&>(
                       dev_ctx)
                       .stream();
93 94 95 96
    paddle::memory::Copy(
        dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
  } else if (paddle::platform::is_cpu_place(src_place) &&  // NOLINT
             paddle::platform::is_gpu_place(dst_place)) {
97 98
    auto src_cpu_place = src_place;
    auto dst_gpu_place = dst_place;
99 100 101 102 103 104 105
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
        paddle::platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
106
    auto ctx_gpu_place = ctx_place;
107 108 109 110 111 112 113 114
    PADDLE_ENFORCE_EQ(dst_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::Unavailable(
                          "Destination place and context place do not match, "
                          "destination place is %s, context place is %s.",
                          dst_gpu_place,
                          ctx_gpu_place));
    auto stream =
115 116 117 118
        blocking ? nullptr
                 : reinterpret_cast<const paddle::platform::CUDADeviceContext&>(
                       dev_ctx)
                       .stream();
119 120 121 122
    paddle::memory::Copy(
        dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
  } else if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
             paddle::platform::is_cuda_pinned_place(dst_place)) {
123 124
    auto src_gpu_place = src_place;
    auto dst_cuda_pinned_place = dst_place;
125 126 127 128 129 130 131
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(ctx_place),
                      true,
                      paddle::platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from GPU memory to CUDA Pinned memory, current "
                          "device context place should be GPU."));
132
    auto ctx_gpu_place = ctx_place;
133 134 135 136 137 138 139 140 141
    PADDLE_ENFORCE_EQ(src_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::PreconditionNotMet(
                          "The source GPU device and current device context do "
                          "not match. The source GPU device number is %d, but "
                          "device context GPU number is %d.",
                          src_gpu_place.device,
                          ctx_gpu_place.device));
    auto stream =
142 143 144 145
        blocking ? nullptr
                 : reinterpret_cast<const paddle::platform::CUDADeviceContext&>(
                       dev_ctx)
                       .stream();
146 147 148 149
    paddle::memory::Copy(
        dst_cuda_pinned_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
  } else if (paddle::platform::is_cuda_pinned_place(src_place) &&  // NOLINT
             paddle::platform::is_gpu_place(dst_place)) {
150 151
    auto src_cuda_pinned_place = src_place;
    auto dst_gpu_place = dst_place;
152 153 154 155 156 157 158
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(ctx_place),
                      true,
                      paddle::platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from CUDA Pinned memory to GPU memory, current "
                          "device context place should be GPU."));
159
    auto ctx_gpu_place = ctx_place;
160 161 162 163 164 165 166 167 168
    PADDLE_ENFORCE_EQ(dst_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::PreconditionNotMet(
                          "The target GPU device and current device context do "
                          "not match. The target GPU device number is %d, but "
                          "device context GPU number is %d.",
                          dst_gpu_place.device,
                          ctx_gpu_place.device));
    auto stream =
169 170 171 172
        blocking ? nullptr
                 : reinterpret_cast<const paddle::platform::CUDADeviceContext&>(
                       dev_ctx)
                       .stream();
173 174 175 176
    paddle::memory::Copy(
        dst_gpu_place, dst_ptr, src_cuda_pinned_place, src_ptr, size, stream);
  } else if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
             paddle::platform::is_gpu_place(dst_place)) {
177 178
    auto src_gpu_place = src_place;
    auto dst_gpu_place = dst_place;
179 180 181 182 183 184 185 186
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
        paddle::platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
    auto stream =
187 188 189 190
        blocking ? nullptr
                 : reinterpret_cast<const paddle::platform::CUDADeviceContext&>(
                       dev_ctx)
                       .stream();
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
    if (paddle::platform::is_same_place(src_place, dst_place)) {
      paddle::memory::Copy(
          dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
    } else {
      if (paddle::platform::is_same_place(ctx_place, src_place)) {
        paddle::memory::Copy(
            dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
        paddle::platform::DeviceContextPool::Instance()
            .Get(src.place())
            ->Wait();
      } else if (paddle::platform::is_same_place(ctx_place, dst_place)) {
        paddle::platform::DeviceContextPool::Instance()
            .Get(src.place())
            ->Wait();
        paddle::memory::Copy(
            dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
      } else {
        PADDLE_THROW(paddle::platform::errors::Unavailable(
            "Context place dose not match the source and destination place."));
      }
    }
212 213 214
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
        "Place type error. Please check the place of src and dst Tensor."));
215 216
  }
}
217

218 219
}  // namespace pten

220 221
PT_REGISTER_GENERAL_KERNEL(
    copy, GPU, ALL_LAYOUT, pten::Copy<pten::GPUContext>, ALL_DTYPE) {}