multihead_matmul_op.cu 10.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <paddle/fluid/platform/device_context.h>
16

17
#include <algorithm>
18

19 20
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
21
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
22
#include "paddle/phi/kernels/funcs/blas/blas.h"
23 24 25 26 27

namespace paddle {
namespace operators {

template <typename T>
28 29 30 31 32
__global__ void transpose(T *src,
                          T *dst,
                          const int batch_size,
                          const int seq_len,
                          const int head_num,
33 34 35 36 37 38 39 40 41
                          const int size_per_head) {
  int batch_id = blockIdx.x / (head_num * seq_len);
  int seq_id = blockIdx.x % seq_len;
  int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
  dst[batch_id * (head_num * seq_len * size_per_head) +
      seq_id * head_num * size_per_head + head_id * size_per_head +
      threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
template <typename T>
inline __device__ T add_func(T a, T b);

template <>
__device__ float add_func<float>(float a, float b) {
  return a + b;
}

template <>
__device__ float2 add_func<float2>(float2 a, float2 b) {
  float2 c;
  c.x = a.x + b.x;
  c.y = a.y + b.y;
  return c;
}

template <>
__device__ float4 add_func<float4>(float4 a, float4 b) {
  float4 c;
  c.x = a.x + b.x;
  c.y = a.y + b.y;
  c.z = a.z + b.z;
  c.w = a.w + b.w;
  return c;
66 67 68
}

template <typename T>
69 70 71
__global__ void TransposeQkvKernel(const int H,
                                   const T *input,
                                   const T *bias,
72
                                   T *output) {
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
  // Input: BxSx3xNxH
  // Bias: 3xSxB
  // Output: 3xBxNxSxH
  int n = threadIdx.y;
  int s = blockIdx.x;
  int b = blockIdx.y;
  int m = blockIdx.z;

  const int N = blockDim.y;
  const int S = gridDim.x;
  const int B = gridDim.y;

  const int NH = N * H;
  const int NHS = NH * S;
  const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
  const int bias_offset = m * NH + n * H;
  const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;

  const int i = threadIdx.x;
  output[out_offset + i] =
      add_func(input[in_offset + i], bias[bias_offset + i]);
}
95

96 97 98 99 100 101 102 103
void TransQKVWithBias(const int batch,
                      const int seq_len,
                      const int head_size,
                      const int head_num,
                      const float *input,
                      const float *bias,
                      float *output,
                      gpuStream_t stream) {
104
  // BxSx3xNxH + 3xNxH -> 3xBxNxSxH
Z
Zhaolong Xing 已提交
105
  int scratch_size = batch * head_num * seq_len * seq_len;
106
  const dim3 grid(seq_len, batch, 3);
Z
Zhaolong Xing 已提交
107 108
  // scratch % 4 == 0 to ensure the alignment
  if (head_size % 4 == 0 && scratch_size % 4 == 0) {
109 110 111 112 113 114 115
    const int h = head_size / 4;
    const float4 *input4 = reinterpret_cast<const float4 *>(input);
    const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
    float4 *output4 = reinterpret_cast<float4 *>(output);
    const dim3 block(h, head_num, 1);

    // limit h * head_num to max block size(1024).
116 117
    PADDLE_ENFORCE_LE(h * head_num,
                      1024,
118 119
                      platform::errors::InvalidArgument(
                          "head_num (%d) * head_size (%d) should <= %d",
120 121 122
                          head_num,
                          head_size,
                          1024 * 4));
123 124
    TransposeQkvKernel<float4>
        <<<grid, block, 0, stream>>>(h, input4, bias4, output4);
Z
Zhaolong Xing 已提交
125
  } else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
126 127 128 129 130 131
    const int h = head_size / 2;
    const float2 *input2 = reinterpret_cast<const float2 *>(input);
    const float2 *bias2 = reinterpret_cast<const float2 *>(bias);
    float2 *output2 = reinterpret_cast<float2 *>(output);
    const dim3 block(h, head_num, 1);
    // limit h * head_num to max block size(1024).
132 133
    PADDLE_ENFORCE_LE(h * head_num,
                      1024,
134 135
                      platform::errors::InvalidArgument(
                          "head_num (%d) * head_size (%d) should <= %d",
136 137 138
                          head_num,
                          head_size,
                          1024 * 2));
139 140
    TransposeQkvKernel<float2>
        <<<grid, block, 0, stream>>>(h, input2, bias2, output2);
141 142 143
  } else {
    const dim3 block(head_size, head_num, 1);
    // limit head_size * head_num to max block size(1024).
144 145
    PADDLE_ENFORCE_LE(head_size * head_num,
                      1024,
146 147
                      platform::errors::InvalidArgument(
                          "head_num (%d) * head_size (%d) should <= %d",
148 149 150
                          head_num,
                          head_size,
                          1024));
151 152
    TransposeQkvKernel<float>
        <<<grid, block, 0, stream>>>(head_size, input, bias, output);
153 154
  }
}
155

F
feng_shuai 已提交
156 157
inline int round_up(int seq_len, int multiple = 32) {
  PADDLE_ENFORCE_GT(
158 159
      multiple,
      0,
F
feng_shuai 已提交
160 161 162 163 164 165
      platform::errors::InvalidArgument(
          "multiple should be a positive number,but it's (%d)", multiple));
  return ((seq_len + multiple - 1) / multiple) * multiple;
}

template <typename T>
166 167 168
__global__ void broadcast(const T *src,
                          T *dst,
                          const int seq_len,
F
feng_shuai 已提交
169 170 171 172 173 174 175 176
                          const int head_num) {
  int batch_id = blockIdx.x / (head_num * seq_len);
  int dst_offset = blockIdx.x * seq_len;
  if (threadIdx.x < seq_len) {
    dst[threadIdx.x + dst_offset] = src[threadIdx.x + batch_id * seq_len];
  }
}

177
template <typename DeviceContext, typename T>
178
class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
179 180
 public:
  void Compute(const framework::ExecutionContext &context) const override {
181 182 183 184
    using Tensor = framework::Tensor;
    auto *input = context.Input<framework::Tensor>("Input");
    auto *w = context.Input<framework::Tensor>("W");
    auto *bias = context.Input<framework::Tensor>("Bias");
185
    auto &bias_qk = GET_DATA_SAFELY(context.Input<framework::Tensor>("BiasQK"),
186 187 188
                                    "Input",
                                    "BiasQK",
                                    "MultiHeadMatMulV2");
189

190 191 192
    auto *input_d = input->data<T>();
    auto *w_d = w->data<T>();
    auto *bias_d = bias->data<T>();
193
    auto *bias_qk_d = bias_qk.template data<T>();
194 195 196 197 198
    T scale = static_cast<T>(context.Attr<float>("alpha"));

    int head_number = context.Attr<int>("head_number");
    // compute q*k with eltadd
    auto &device_ctx = context.template device_context<DeviceContext>();
F
feng_shuai 已提交
199
    auto stream = device_ctx.stream();
200 201 202 203 204 205 206
    // should be (B * S * hidden)
    auto input_dims = input->dims();
    // shouble be (hidden * 3 * all_head_size)
    auto w_dims = w->dims();
    int batch = input_dims[0];
    int seq_len = input_dims[1];
    int hidden = input_dims[2];
F
feng_shuai 已提交
207 208 209 210 211 212 213
    Tensor temp_bias_tensor;
    // if bias_qk is[batch, 1, 1, seq_len], the bias_qk_d need to be broadcasted
    if (bias_qk.numel() == (batch * seq_len)) {
      temp_bias_tensor.Resize({batch * head_number * seq_len * seq_len});
      auto *temp_qk_bias = temp_bias_tensor.mutable_data<T>(context.GetPlace());
      int grid = batch * head_number * seq_len;
      int block = round_up(seq_len);
214 215
      broadcast<<<grid, block, 0, stream>>>(
          bias_qk_d, temp_qk_bias, seq_len, head_number);
F
feng_shuai 已提交
216 217
      bias_qk_d = static_cast<const T *>(temp_qk_bias);
    }
218 219 220
    int all_head_size = w_dims[2];
    int head_size = all_head_size / head_number;

221 222 223 224
    auto *out = context.Output<framework::Tensor>("Out");
    out->Resize({batch, seq_len, all_head_size});
    auto *output_d = out->mutable_data<T>(context.GetPlace());

225 226 227 228 229 230 231 232 233
    // (B*S, hidden)
    const Tensor input_matrix =
        framework::ReshapeToMatrix(*input, 2 /*x_num_col_dims */);
    // (hidden, 3 * all_head_size)
    const Tensor w_matrix =
        framework::ReshapeToMatrix(*w, 1 /*y_num_col_dims*/);

    Tensor temp_out_tensor;
    auto temp_out_dims =
234
        phi::make_ddim({batch, seq_len, 3, head_number, head_size});
235
    temp_out_tensor.Resize(
236
        {batch * seq_len, phi::product(temp_out_dims) / (batch * seq_len)});
237 238 239
    auto *temp_out_data = temp_out_tensor.mutable_data<T>(context.GetPlace());

    // (B * S, hidden) * (hidden, 3 * N * H) -> (B * S * 3 * N * H)
L
Leo Chen 已提交
240
    auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(device_ctx);
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
    blas.MatMul(input_matrix, w_matrix, &temp_out_tensor);

    // temp_out_tensor.Resize(temp_out_dims);

    Tensor multihead_temp_tensor;
    // B * head_number * S * S * 1 + B * S * 3 * N * H
    int scratch_size = batch * head_number * seq_len * seq_len * 1;
    multihead_temp_tensor.Resize({scratch_size + temp_out_tensor.numel()});
    auto *multihead_temp_data =
        multihead_temp_tensor.mutable_data<T>(context.GetPlace());
    auto *qkptr = multihead_temp_data;
    auto *tptr = multihead_temp_data + scratch_size;

    // Do the transpose with bias.
    // BxSx3xNxH => tptr: 3xBxNxSxH.
256 257 258 259 260 261 262 263
    TransQKVWithBias(batch,
                     seq_len,
                     head_size,
                     head_number,
                     temp_out_data,
                     bias_d,
                     tptr,
                     stream);
264

265
    math::MultiHeadGPUComputeFunctor<T> multihead_compute_func;
266 267 268 269 270 271 272 273 274 275
    multihead_compute_func(device_ctx,
                           batch,
                           seq_len,
                           head_number,
                           head_size,
                           qkptr,
                           bias_qk_d,
                           tptr,
                           scale,
                           T(0.0));
276 277 278

    int grid = batch * head_number * seq_len;
    int block = head_size;
279 280
    transpose<T><<<grid, block, 0, stream>>>(
        tptr, output_d, batch, seq_len, head_number, head_size);
281 282 283 284 285 286 287
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
L
Leo Chen 已提交
288 289
REGISTER_OP_CUDA_KERNEL(multihead_matmul,
                        ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);