strided_memcpy.h 3.9 KB
Newer Older
Y
Yu Yang 已提交
1 2
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yu Yang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yu Yang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14 15

#pragma once
Y
Yi Wang 已提交
16
#include "paddle/fluid/operators/detail/strided_memcpy.h"
Y
Yu Yang 已提交
17 18 19 20

namespace paddle {
namespace operators {

21
// Strided memory copy from src to dst.
Y
Yu Yang 已提交
22
//
23 24 25 26
// The src and dst should be both on dev_ctx.GetPlace(), otherwise, there will
// be a segment fault.
//
// The stride of an array (also referred to as increment, pitch or step size) is
Y
Yu Yang 已提交
27 28 29 30 31 32 33 34 35
// the number of locations in memory between beginnings of successive array
// elements
//
// For example, for tensor like [1, 3, 300, 300]. If there is no padding, the
// stride is [270000, 90000, 300, 1].
//
// NOTE: When use GPU, the memcpy is async. To sync memcpy, please invoke
// `dev_ctx.Wait()`.
template <typename T>
36 37 38 39
inline void StridedMemcpy(const platform::DeviceContext& dev_ctx, const T* src,
                          const framework::DDim& src_stride,
                          const framework::DDim& dst_dim,
                          const framework::DDim& dst_stride, T* dst) {
Y
Yu Yang 已提交
40
  using namespace detail;
41
  StridedCopyDimVisitor<T> func(dev_ctx, src, src_stride, dst_stride, dst);
Y
Yu Yang 已提交
42 43
  boost::apply_visitor(func, dst_dim);
}
Y
Yancey1989 已提交
44 45 46 47 48 49 50 51 52

// Strided numel memory copy from src to dst by the specified axis
//
// For example, for a tensor dims [4, 20, 100], the strieded numel is
// [8000, 2000, 100]
//
// NOTE: The src and dst tensor should have the same elements
// except the specified axis.
template <typename T>
Y
fix ci  
Yancey1989 已提交
53
inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx,
Y
Yancey1989 已提交
54 55 56
                                     int64_t axis, T* dst,
                                     const framework::DDim& dst_stride_numel,
                                     const T* src,
T
typhoonzero 已提交
57 58
                                     const framework::DDim& src_stride_numel,
                                     int64_t size) {
Y
Yancey1989 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
  int64_t before = dst_stride_numel[0] / dst_stride_numel[axis];
  int64_t src_after = src_stride_numel[axis];
  int64_t dst_after = dst_stride_numel[axis];
  auto place = ctx.GetPlace();

  PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(),
                    "src and dst tensor should have the same dims size.");

  for (int64_t i = 0; i < axis; ++i) {
    if (i < axis) {
      PADDLE_ENFORCE_EQ(src_stride_numel[i] / src_stride_numel[axis],
                        dst_stride_numel[i] / dst_stride_numel[axis],
                        "src and dst should have the same elements "
                        "except the specified axis.");
    } else if (i == axis) {
      continue;
    } else {
      PADDLE_ENFORCE_EQ(src_stride_numel[i], dst_stride_numel[i],
                        "src and dst should have the same elements "
                        "except the specified axis.");
    }
  }

  for (int64_t i = 0; i < before; ++i) {
    if (platform::is_cpu_place(place)) {
      auto& cpu_place = boost::get<platform::CPUPlace>(place);
      memory::Copy(cpu_place, dst + i * dst_after, cpu_place,
T
typhoonzero 已提交
86
                   src + i * src_after, sizeof(T) * size);
Y
Yancey1989 已提交
87 88 89 90 91
    } else {
#ifdef PADDLE_WITH_CUDA
      auto& gpu_place = boost::get<platform::CUDAPlace>(place);
      auto& cuda_ctx =
          reinterpret_cast<const platform::CUDADeviceContext&>(ctx);
Y
fix ci  
Yancey1989 已提交
92
      memory::Copy(gpu_place, dst + i * dst_after, gpu_place,
T
typhoonzero 已提交
93
                   src + i * src_after, sizeof(T) * size, cuda_ctx.stream());
Y
Yancey1989 已提交
94 95 96 97 98 99 100
#else
      PADDLE_THROW("Paddle is not compiled with GPU");
#endif
    }
  }
}

Y
Yu Yang 已提交
101 102
}  // namespace operators
}  // namespace paddle