copy_kernel.cu 9.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16 17
#include "paddle/pten/kernels/copy_kernel.h"

#include "paddle/pten/backends/gpu/gpu_context.h"
18
#include "paddle/pten/common/data_type.h"
19
#include "paddle/pten/core/compat/convert_utils.h"
20
#include "paddle/pten/core/kernel_registry.h"
21 22 23

// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/memcpy.h"
24
#include "paddle/fluid/platform/device_context.h"
25 26 27

namespace pten {

28 29
template <typename Context>
void Copy(const Context& dev_ctx,
30
          const DenseTensor& src,
31
          bool blocking,
32 33 34
          DenseTensor* dst) {
  auto* src_ptr = src.data();
  const auto& src_place = src.place();
35
  auto dst_place = dst->place();
36

37 38 39 40 41 42 43 44 45
  if (src_place == dst_place && paddle::platform::is_cpu_place(src_place)) {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
        "The src and dst tensor are all CPU tensor, you should call copy "
        "function in CPU mode."));
  }

  VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
          << dst_place;

46
  dst->ResizeAndAllocate(src.dims());
47
  auto* dst_ptr = dst->mutable_data(dst_place);
48

49 50 51 52 53 54
  if (src_ptr == dst_ptr && src_place == dst_place) {
    VLOG(3) << "Skip copy the same data async from " << src_place << " to "
            << dst_place;
    return;
  }
  VLOG(4) << "src:" << src_ptr << ", dst:" << dst_ptr;
55

56
  CHECK(dst->layout() == src.layout());
57

58
  auto size = src.numel() * paddle::experimental::SizeOf(src.dtype());
59 60 61

  if (paddle::platform::is_cuda_pinned_place(src_place) &&  // NOLINT
      paddle::platform::is_cuda_pinned_place(dst_place)) {
62
    paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
63 64
  } else if (paddle::platform::is_cuda_pinned_place(src_place) &&  // NOLINT
             paddle::platform::is_cpu_place(dst_place)) {
65
    paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
66 67
  } else if (paddle::platform::is_cpu_place(src_place) &&  // NOLINT
             paddle::platform::is_cuda_pinned_place(dst_place)) {
68
    paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
69 70
  } else if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
             paddle::platform::is_cpu_place(dst_place)) {
71 72
    auto src_gpu_place = src_place;
    auto dst_cpu_place = dst_place;
73 74 75 76 77 78 79
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
        paddle::platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
80
    auto ctx_gpu_place = ctx_place;
81 82 83 84 85 86 87 88
    PADDLE_ENFORCE_EQ(src_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::Unavailable(
                          "Source place and context place do not match, source "
                          "place is %s, context place is %s.",
                          src_gpu_place,
                          ctx_gpu_place));
    auto stream =
89
        blocking ? nullptr
W
Wilber 已提交
90
                 : reinterpret_cast<const pten::GPUContext&>(dev_ctx).stream();
91 92 93 94
    paddle::memory::Copy(
        dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
  } else if (paddle::platform::is_cpu_place(src_place) &&  // NOLINT
             paddle::platform::is_gpu_place(dst_place)) {
95 96
    auto src_cpu_place = src_place;
    auto dst_gpu_place = dst_place;
97 98 99 100 101 102 103
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
        paddle::platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
104
    auto ctx_gpu_place = ctx_place;
105 106 107 108 109 110 111 112
    PADDLE_ENFORCE_EQ(dst_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::Unavailable(
                          "Destination place and context place do not match, "
                          "destination place is %s, context place is %s.",
                          dst_gpu_place,
                          ctx_gpu_place));
    auto stream =
113
        blocking ? nullptr
W
Wilber 已提交
114
                 : reinterpret_cast<const pten::GPUContext&>(dev_ctx).stream();
115 116 117 118
    paddle::memory::Copy(
        dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
  } else if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
             paddle::platform::is_cuda_pinned_place(dst_place)) {
119 120
    auto src_gpu_place = src_place;
    auto dst_cuda_pinned_place = dst_place;
121 122 123 124 125 126 127
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(ctx_place),
                      true,
                      paddle::platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from GPU memory to CUDA Pinned memory, current "
                          "device context place should be GPU."));
128
    auto ctx_gpu_place = ctx_place;
129 130 131 132 133 134 135 136 137
    PADDLE_ENFORCE_EQ(src_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::PreconditionNotMet(
                          "The source GPU device and current device context do "
                          "not match. The source GPU device number is %d, but "
                          "device context GPU number is %d.",
                          src_gpu_place.device,
                          ctx_gpu_place.device));
    auto stream =
138
        blocking ? nullptr
W
Wilber 已提交
139
                 : reinterpret_cast<const pten::GPUContext&>(dev_ctx).stream();
140 141 142 143
    paddle::memory::Copy(
        dst_cuda_pinned_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
  } else if (paddle::platform::is_cuda_pinned_place(src_place) &&  // NOLINT
             paddle::platform::is_gpu_place(dst_place)) {
144 145
    auto src_cuda_pinned_place = src_place;
    auto dst_gpu_place = dst_place;
146 147 148 149 150 151 152
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(paddle::platform::is_gpu_place(ctx_place),
                      true,
                      paddle::platform::errors::PreconditionNotMet(
                          "Device context place mismatch. When copying Tensor "
                          "data from CUDA Pinned memory to GPU memory, current "
                          "device context place should be GPU."));
153
    auto ctx_gpu_place = ctx_place;
154 155 156 157 158 159 160 161 162
    PADDLE_ENFORCE_EQ(dst_gpu_place,
                      ctx_gpu_place,
                      paddle::platform::errors::PreconditionNotMet(
                          "The target GPU device and current device context do "
                          "not match. The target GPU device number is %d, but "
                          "device context GPU number is %d.",
                          dst_gpu_place.device,
                          ctx_gpu_place.device));
    auto stream =
163
        blocking ? nullptr
W
Wilber 已提交
164
                 : reinterpret_cast<const pten::GPUContext&>(dev_ctx).stream();
165 166 167 168
    paddle::memory::Copy(
        dst_gpu_place, dst_ptr, src_cuda_pinned_place, src_ptr, size, stream);
  } else if (paddle::platform::is_gpu_place(src_place) &&  // NOLINT
             paddle::platform::is_gpu_place(dst_place)) {
169 170
    auto src_gpu_place = src_place;
    auto dst_gpu_place = dst_place;
171 172 173 174 175 176 177 178
    auto ctx_place = dev_ctx.GetPlace();
    PADDLE_ENFORCE_EQ(
        paddle::platform::is_gpu_place(ctx_place),
        true,
        paddle::platform::errors::PreconditionNotMet(
            "Context place error, excepted GPUPlace, but actually %s.",
            ctx_place));
    auto stream =
179
        blocking ? nullptr
W
Wilber 已提交
180
                 : reinterpret_cast<const pten::GPUContext&>(dev_ctx).stream();
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
    if (paddle::platform::is_same_place(src_place, dst_place)) {
      paddle::memory::Copy(
          dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
    } else {
      if (paddle::platform::is_same_place(ctx_place, src_place)) {
        paddle::memory::Copy(
            dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
        paddle::platform::DeviceContextPool::Instance()
            .Get(src.place())
            ->Wait();
      } else if (paddle::platform::is_same_place(ctx_place, dst_place)) {
        paddle::platform::DeviceContextPool::Instance()
            .Get(src.place())
            ->Wait();
        paddle::memory::Copy(
            dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
      } else {
        PADDLE_THROW(paddle::platform::errors::Unavailable(
            "Context place dose not match the source and destination place."));
      }
    }
202 203 204
  } else {
    PADDLE_THROW(paddle::platform::errors::InvalidArgument(
        "Place type error. Please check the place of src and dst Tensor."));
205 206
  }
}
207

208 209
}  // namespace pten

210 211
PT_REGISTER_GENERAL_KERNEL(
    copy, GPU, ALL_LAYOUT, pten::Copy<pten::GPUContext>, ALL_DTYPE) {}