top_k_op.cu 7.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
武毅 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
武毅 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
武毅 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
武毅 已提交
14

W
wawltor 已提交
15
#pragma once
16
#include <cstdio>
W
wawltor 已提交
17
#include <vector>
18
#ifdef __NVCC__
19
#include "cub/cub.cuh"
20 21 22 23
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
#endif
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/op_registry.h"
W
wawltor 已提交
25
#include "paddle/fluid/operators/top_k_function_cuda.h"
26
#include "paddle/fluid/operators/top_k_op.h"
W
Wu Yi 已提交
27
#include "paddle/fluid/platform/float16.h"
28 29
// set cub base traits in order to handle float16

武毅 已提交
30 31 32
namespace paddle {
namespace operators {

33
using Tensor = phi::DenseTensor;
武毅 已提交
34

35 36 37 38 39 40
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
  case (dim): {                        \
    constexpr auto kBlockDim = (dim);  \
    __VA_ARGS__;                       \
  } break

41 42 43 44 45 46 47 48 49 50 51 52
#define FIXED_MAXLENGTH_BASE(MaxLength, ...) \
  case (MaxLength): {                        \
    constexpr auto maxLength = (MaxLength);  \
    __VA_ARGS__;                             \
  } break

#define FIXED_BLOCK_DIM(...)                 \
  FIXED_BLOCK_DIM_BASE(1024, ##__VA_ARGS__); \
  FIXED_BLOCK_DIM_BASE(512, ##__VA_ARGS__);  \
  FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__);  \
  FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__);  \
  FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__);   \
53 54
  FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)

55 56 57 58 59 60 61
#define FIXED_MAXLENGTH(...)              \
  FIXED_MAXLENGTH_BASE(1, ##__VA_ARGS__); \
  FIXED_MAXLENGTH_BASE(2, ##__VA_ARGS__); \
  FIXED_MAXLENGTH_BASE(3, ##__VA_ARGS__); \
  FIXED_MAXLENGTH_BASE(4, ##__VA_ARGS__); \
  FIXED_MAXLENGTH_BASE(5, ##__VA_ARGS__)

62
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
63
class TopkOpCUDAKernel : public framework::OpKernel<T> {
武毅 已提交
64 65
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
J
Jiawei Wang 已提交
66
    PADDLE_ENFORCE_EQ(
67 68
        platform::is_gpu_place(ctx.GetPlace()),
        true,
J
Jiawei Wang 已提交
69
        platform::errors::InvalidArgument("It must use CUDAPlace."));
70 71 72
    auto* input = ctx.Input<phi::DenseTensor>("X");
    auto* output = ctx.Output<phi::DenseTensor>("Out");
    auto* indices = ctx.Output<phi::DenseTensor>("Indices");
73
    int k = static_cast<int>(ctx.Attr<int>("k"));
武毅 已提交
74

75
    auto* k_t = ctx.Input<phi::DenseTensor>("K");
W
whs 已提交
76 77 78 79 80 81 82 83 84 85
    if (k_t) {
      Tensor k_host;
      framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host);
      k = k_host.data<int>()[0];
      framework::DDim output_dims = output->dims();
      output_dims[output_dims.size() - 1] = k;
      output->Resize(output_dims);
      indices->Resize(output_dims);
    }

武毅 已提交
86 87 88 89
    const T* input_data = input->data<T>();
    T* output_data = output->mutable_data<T>(ctx.GetPlace());
    // FIXME(typhoonzero): data is always converted to type T?

Q
qingqing01 已提交
90
    framework::DDim inputdims = input->dims();
91
    const int64_t input_height =
92
        phi::product(phi::slice_ddim(inputdims, 0, inputdims.size() - 1));
93
    const int64_t input_width = inputdims[inputdims.size() - 1];
94 95
    const auto& dev_ctx = ctx.cuda_device_context();
    if ((input_width <= 1024 || k >= 128 || k == input_width)) {
96 97
      if (SortTopk<T>(
              dev_ctx, input, input_width, input_height, k, output, indices)) {
98 99 100 101 102 103 104 105
        // Successed, return.
        return;
      } else {
        LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use "
                     "default topk kernel.";
      }
    }
    int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
武毅 已提交
106 107 108 109 110
    if (k > input_width) k = input_width;

    // NOTE: pass lds and dim same to input width.
    // NOTE: old matrix implementation of stride is different to eigen.
    // TODO(typhoonzero): refine this kernel.
111 112
    const int kMaxHeight = 2048;
    int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
    paddle::platform::GpuLaunchConfig config =
        paddle::platform::GetGpuLaunchConfig1D(dev_ctx, input_width);
    switch (config.thread_per_block.x) {
      FIXED_BLOCK_DIM(switch (getMaxLength(k)) {
        FIXED_MAXLENGTH(
            KeMatrixTopK<T, maxLength, kBlockDim>
            <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data,
                                                        k,
                                                        indices_data,
                                                        input_data,
                                                        input_width,
                                                        input_width,
                                                        static_cast<int>(k),
                                                        gridx,
                                                        input_height));
        default:
          PADDLE_THROW(platform::errors::Fatal(
130 131
              "the input k has error when use getMaxLength function to get the "
              "maxLength."));
132
      });
133
      default:
134 135
        PADDLE_THROW(platform::errors::Unavailable(
            "Calculation error occurred in TopK Operator's CUDA Kernel."));
136
    }
武毅 已提交
137 138 139
  }
};

140 141 142 143 144
template <typename DeviceContext, typename T>
class TopkOpGradCUDAKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    PADDLE_ENFORCE_EQ(
145 146
        platform::is_gpu_place(context.GetPlace()),
        true,
147
        platform::errors::InvalidArgument("It must use CUDAPlace."));
148 149 150 151 152 153
    auto* x = context.Input<phi::DenseTensor>("X");
    auto* out_grad =
        context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
    auto* indices = context.Input<phi::DenseTensor>("Indices");
    auto* x_grad =
        context.Output<phi::DenseTensor>(framework::GradVarName("X"));
154 155 156 157 158 159 160 161

    T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
    const T* out_grad_data = out_grad->data<T>();
    const int64_t* indices_data = indices->data<int64_t>();
    size_t k = indices->dims()[indices->dims().size() - 1];

    framework::DDim xdims = x->dims();
    const size_t row =
162
        phi::product(phi::slice_ddim(xdims, 0, xdims.size() - 1));
163 164 165 166 167 168
    const size_t col = xdims[xdims.size() - 1];
    const auto& dev_ctx = context.cuda_device_context();
    const int kMaxHeight = 2048;
    int gridx = row < kMaxHeight ? row : kMaxHeight;
    switch (GetDesiredBlockDim(col)) {
      FIXED_BLOCK_DIM(
169 170
          AssignGrad<T, 5, kBlockDim>
          <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
171 172 173 174 175 176 177
              x_grad_data, indices_data, out_grad_data, row, col, k));
      default:
        PADDLE_THROW(
            platform::errors::Unavailable("Error occurs when Assign Grad."));
    }
  }
};
178 179 180
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM

武毅 已提交
181 182
}  // namespace operators
}  // namespace paddle
W
Wu Yi 已提交
183
REGISTER_OP_CUDA_KERNEL(
184
    top_k,
L
Leo Chen 已提交
185 186 187 188 189
    paddle::operators::TopkOpCUDAKernel<phi::GPUContext, float>,
    paddle::operators::TopkOpCUDAKernel<phi::GPUContext, double>,
    paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int>,
    paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int64_t>,
    paddle::operators::TopkOpCUDAKernel<phi::GPUContext,
190 191 192 193
                                        paddle::platform::float16>);

REGISTER_OP_CUDA_KERNEL(
    top_k_grad,
L
Leo Chen 已提交
194 195 196 197 198
    paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, float>,
    paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, double>,
    paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int>,
    paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int64_t>,
    paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext,
199
                                            paddle::platform::float16>);