conv_grad_kernel.cu 10.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Z
zhangkaihuo 已提交
15
#include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
16

17
#include "glog/logging.h"
18 19 20 21
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
22
#include "paddle/phi/core/tensor_utils.h"
23
#include "paddle/phi/core/visit_type.h"
24 25
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
26
#include "paddle/phi/kernels/sparse/gpu/conv.cu.h"
27 28 29 30 31 32 33 34 35 36 37 38

namespace phi {
namespace sparse {

// rulebook[3, rulebook_len]:
//[
//  [kernel_index],
//  [in_i],
//  [out_i],
//]
// x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad
39
template <typename T, typename IntT>
Z
zhangkaihuo 已提交
40 41 42
void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx,
                            const SparseCooTensor& x,
                            const DenseTensor& kernel,
43
                            const SparseCooTensor& out,
Z
zhangkaihuo 已提交
44
                            const DenseTensor& rulebook,
45
                            const DenseTensor& counter,
Z
zhangkaihuo 已提交
46 47 48 49 50 51
                            const SparseCooTensor& out_grad,
                            const std::vector<int>& paddings,
                            const std::vector<int>& dilations,
                            const std::vector<int>& strides,
                            const int groups,
                            const bool subm,
52
                            const std::string& key,
Z
zhangkaihuo 已提交
53 54
                            SparseCooTensor* x_grad,
                            DenseTensor* kernel_grad) {
55 56 57 58 59
  const auto& kernel_dims = kernel.dims();
  const int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
  const int in_channels = kernel_dims[3];
  const int out_channels = kernel_dims[4];

60 61 62 63
  int rulebook_len = 0;
  const IntT* rulebook_ptr = phi::funcs::sparse::GetRulebookPtr<IntT>(
      out, rulebook, key, &rulebook_len);
  const int* counter_ptr = phi::funcs::sparse::GetCounterPtr(out, counter, key);
64 65

  phi::DenseTensor in_features =
66
      phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
67
  phi::DenseTensor d_x_features =
68
      phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
69
  phi::DenseTensor out_grad_features =
70
      phi::Empty<T>(dev_ctx, {rulebook_len, out_channels});
71 72 73 74

  T* in_features_ptr = in_features.data<T>();
  T* d_x_features_ptr = d_x_features.data<T>();
  T* out_grad_features_ptr = out_grad_features.data<T>();
75
  *kernel_grad = phi::EmptyLike<T>(dev_ctx, kernel);
76
  T* d_kernel_ptr = kernel_grad->data<T>();
77 78
  phi::backends::gpu::GpuMemsetAsync(
      d_kernel_ptr, 0, sizeof(T) * kernel_grad->numel(), dev_ctx.stream());
79

Z
zhangkaihuo 已提交
80
  int half_kernel_size = kernel_size / 2;
81
  auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
82
  DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices());
83
  DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.values());
84
  T* x_grad_values_ptr = x_grad_values.data<T>();
85 86 87 88 89 90
  phi::backends::gpu::GpuMemsetAsync(x_grad_values_ptr,
                                     0,
                                     sizeof(T) * x_grad_values.numel(),
                                     dev_ctx.stream());
  phi::backends::gpu::GpuMemsetAsync(
      d_x_features_ptr, 0, sizeof(T) * d_x_features.numel(), dev_ctx.stream());
91 92
  phi::Copy<GPUContext>(
      dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices);
93
  x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true);
Z
zhangkaihuo 已提交
94

95
  std::vector<int> offsets(kernel_size + 1);
96

97
  int offset = 0, max_count = 0;
98 99
  for (int i = 0; i < kernel_size; i++) {
    offsets[i] = offset;
100
    offset += counter_ptr[i];
Z
zhangkaihuo 已提交
101
    if (i < half_kernel_size) {
102
      max_count = std::max(max_count, counter_ptr[i]);
Z
zhangkaihuo 已提交
103
    }
104 105 106
  }
  offsets[kernel_size] = offset;

Z
zhangkaihuo 已提交
107
  if (subm) {
108 109 110 111 112 113 114 115 116
    phi::funcs::sparse::SubmPreProcess<T, GPUContext>(dev_ctx,
                                                      x,
                                                      kernel,
                                                      out_grad.values(),
                                                      in_channels,
                                                      out_channels,
                                                      half_kernel_size,
                                                      kernel_grad,
                                                      &x_grad_values);
Z
zhangkaihuo 已提交
117 118 119 120 121
    if (max_count == 0) {
      return;
    }
  }

122 123 124
  auto config =
      phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rulebook_len, 1);
  DenseTensor unique_value = phi::Empty<int>(
125
      dev_ctx, {static_cast<int>(x_grad->nnz() * kernel_size * 2)});
126 127 128 129 130 131
  DenseTensor out_index =
      phi::Empty<int>(dev_ctx, {static_cast<int>(x.nnz() * 2)});
  int* out_index_ptr = out_index.data<int>();
  int* unique_value_ptr = unique_value.data<int>();
  phi::backends::gpu::GpuMemsetAsync(
      out_index_ptr, 0, sizeof(int) * x.nnz() * 2, dev_ctx.stream());
Z
zhangkaihuo 已提交
132

133 134 135 136 137
  GroupIndexsV2<<<config.block_per_grid,
                  config.thread_per_block,
                  0,
                  dev_ctx.stream()>>>(rulebook_len,
                                      x.nnz(),
138
                                      kernel_size,
139 140 141 142 143 144
                                      offsets[kernel_size / 2],
                                      rulebook_ptr,
                                      out_index_ptr,
                                      unique_value_ptr);

  GatherV2<T, IntT>(dev_ctx,
145
                    x.values().data<T>(),
146 147 148 149 150 151 152 153 154
                    out_index_ptr,
                    unique_value_ptr,
                    x.nnz(),
                    kernel_size,
                    in_channels,
                    2,
                    in_features_ptr);

  Gather<T, IntT>(dev_ctx,
155
                  out_grad.values().data<T>(),
156 157 158 159
                  rulebook_ptr + rulebook_len,
                  rulebook_len,
                  out_channels,
                  out_grad_features_ptr);
Z
zhangkaihuo 已提交
160

161 162
  const T* kernel_ptr = kernel.data<T>();
  for (int i = 0; i < kernel_size; i++) {
163
    if (counter_ptr[i] <= 0 || (subm && i == half_kernel_size)) {
164 165 166
      continue;
    }

167
    const int M = counter_ptr[i];
168 169 170 171 172
    const int K = in_channels;
    const int N = out_channels;
    T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels;
    T* tmp_out_grad_ptr = out_grad_features_ptr + offsets[i] * out_channels;
    const T* tmp_kernel_ptr = kernel_ptr + i * in_channels * out_channels;
173
    T* tmp_d_x_ptr = d_x_features_ptr + offsets[i] * in_channels;
174 175 176 177 178 179 180
    T* tmp_d_kernel_ptr = d_kernel_ptr + i * in_channels * out_channels;

    // call gemm: d_kernel = transpose(x) * out_grad
    // (in_channels, n) * (n, out_channels)
    blas.GEMM(CblasTrans,
              CblasNoTrans,
              K,
181 182
              N,
              M,
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
              static_cast<T>(1),
              tmp_in_ptr,
              tmp_out_grad_ptr,
              static_cast<T>(0),
              tmp_d_kernel_ptr);

    // call gemm: d_x = out_grad * transpose(kernel)
    // (n, out_channels) * (out_channels, in_channels)
    blas.GEMM(CblasNoTrans,
              CblasTrans,
              M,
              K,
              N,
              static_cast<T>(1),
              tmp_out_grad_ptr,
              tmp_kernel_ptr,
              static_cast<T>(0),
              tmp_d_x_ptr);
  }

  // 4. scatter
204 205 206 207 208 209 210 211 212
  phi::funcs::sparse::ScatterV2<T>(dev_ctx,
                                   d_x_features_ptr,
                                   out_index.data<int>(),
                                   unique_value.data<int>(),
                                   x_grad->nnz(),
                                   kernel_size,
                                   in_channels,
                                   2,
                                   x_grad_values_ptr);
213 214
}

215
template <typename T, typename Context>
Z
zhangkaihuo 已提交
216 217 218
void Conv3dCooGradKernel(const Context& dev_ctx,
                         const SparseCooTensor& x,
                         const DenseTensor& kernel,
219
                         const SparseCooTensor& out,
Z
zhangkaihuo 已提交
220
                         const DenseTensor& rulebook,
221
                         const DenseTensor& counter,
Z
zhangkaihuo 已提交
222 223 224 225 226 227
                         const SparseCooTensor& out_grad,
                         const std::vector<int>& paddings,
                         const std::vector<int>& dilations,
                         const std::vector<int>& strides,
                         const int groups,
                         const bool subm,
228
                         const std::string& key,
Z
zhangkaihuo 已提交
229 230
                         SparseCooTensor* x_grad,
                         DenseTensor* kernel_grad) {
Z
zhangkaihuo 已提交
231
  PD_VISIT_BASE_INTEGRAL_TYPES(
232
      x.indices().dtype(), "Conv3dCooGradGPUKernel", ([&] {
Z
zhangkaihuo 已提交
233 234 235
        Conv3dCooGradGPUKernel<T, data_t>(dev_ctx,
                                          x,
                                          kernel,
236
                                          out,
Z
zhangkaihuo 已提交
237
                                          rulebook,
238
                                          counter,
Z
zhangkaihuo 已提交
239 240 241 242 243 244
                                          out_grad,
                                          paddings,
                                          dilations,
                                          strides,
                                          groups,
                                          subm,
245
                                          key,
Z
zhangkaihuo 已提交
246 247
                                          x_grad,
                                          kernel_grad);
248 249 250
      }));
}

251 252 253
}  // namespace sparse
}  // namespace phi

Z
zhangkaihuo 已提交
254
PD_REGISTER_KERNEL(conv3d_coo_grad,
255 256
                   GPU,
                   ALL_LAYOUT,
Z
zhangkaihuo 已提交
257
                   phi::sparse::Conv3dCooGradKernel,
258 259 260 261 262
                   float,
                   double,
                   phi::dtype::float16) {
  kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}