group_norm_op.cu 28.6 KB
Newer Older
D
Dun 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#ifdef __NVCC__
16
#include "cub/cub.cuh"
17 18 19
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
20
namespace cub = hipcub;
21 22
#endif

D
Dun 已提交
23
#include "paddle/fluid/operators/group_norm_op.h"
24 25
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
D
Dun 已提交
26 27 28 29

namespace paddle {
namespace operators {

30
using DataLayout = framework::DataLayout;
31
enum GroupNormKernelFlags { kHasScale = 1, kHasBias = 2 };
32
#define ALIGN_BYTES 16
33

P
peizhilin 已提交
34 35 36
#define CHECK_CASE(i, flags, kernel_name, ...)                              \
  if (i == flags) {                                                         \
    kernel_name<T, i><<<grid, threads, 0, dev_ctx.stream()>>>(__VA_ARGS__); \
37 38 39 40 41 42
  }

// 0 for no scale, no bias
// 1 for has scale, no bias
// 2 for no scale, has bias
// 3 for has scale, has bias
P
peizhilin 已提交
43 44 45 46 47
#define UNROLL_ALL_CASES(flags, kernel_name, ...) \
  CHECK_CASE(0, flags, kernel_name, __VA_ARGS__)  \
  CHECK_CASE(1, flags, kernel_name, __VA_ARGS__)  \
  CHECK_CASE(2, flags, kernel_name, __VA_ARGS__)  \
  CHECK_CASE(3, flags, kernel_name, __VA_ARGS__)
48 49 50 51 52 53 54 55 56

template <typename T>
__device__ __inline__ void CudaAtomicAddWithWarp(T* sum, T value) {
  typedef cub::WarpReduce<T> WarpReduce;
  typename WarpReduce::TempStorage temp_storage;
  value = WarpReduce(temp_storage).Sum(value);
  if (cub::LaneId() == 0) platform::CudaAtomicAdd(sum, value);
}

D
Dun 已提交
57
template <typename T>
58 59 60 61 62 63 64 65 66
__global__ void GroupNormForwardGetMeanAndVar(const T* x,
                                              int N,
                                              int C,
                                              int W,
                                              int imsize,
                                              int groups,
                                              int group_size,
                                              T* mean,
                                              T* var) {
D
Dun 已提交
67 68 69
  int gid = blockIdx.y;
  int cid = blockIdx.x;
  int bid = blockIdx.z;
70
  int H = imsize / W;
D
Dun 已提交
71 72 73 74 75
  int number = min(group_size, static_cast<int>(C - gid * group_size));
  int ccid = gid * group_size + cid;
  if (ccid >= C) return;
  T x_mean = 0, x_var = 0;
  for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
76
    T val;
77 78 79 80
    int hid = imid / W;
    int wid = imid % W;
    val = x[(bid * H + hid) * W * C + wid * C + ccid];

D
Dun 已提交
81 82 83 84 85
    x_mean += val;
    x_var += val * val;
  }
  x_mean /= number * imsize;
  x_var /= number * imsize;
86 87
  CudaAtomicAddWithWarp(&mean[bid * groups + gid], x_mean);
  CudaAtomicAddWithWarp(&var[bid * groups + gid], x_var);
D
Dun 已提交
88 89
}

90 91
template <typename T, typename AccT, int VecSize, int Num>
__device__ __forceinline__ void ThreadReduce(phi::Array<const T*, Num> arrs,
92 93 94 95
                                             int size,
                                             const int offset,
                                             AccT* out_mean,
                                             AccT* out_var) {
96 97 98 99 100
  const T* x = arrs[0];
  const T* y;
  if (Num == 2) {
    y = arrs[1];
  }
101 102 103
  using VecT = kps::details::VectorType<T, VecSize>;
  int tid = threadIdx.x;
  if (offset > 0) {
104 105 106 107
    x -= offset;
    if (Num == 2) {
      y -= offset;
    }
108 109
    size += offset;
    if (tid >= offset) {
110 111 112 113 114 115 116
      if (Num == 1) {
        *out_mean += x[tid];
        *out_var += x[tid] * x[tid];
      } else if (Num == 2) {
        *out_mean += y[tid];
        *out_var += y[tid] * x[tid];
      }
117 118
    }
    size -= blockDim.x;
119 120 121 122
    x += blockDim.x;
    if (Num == 2) {
      y += blockDim.x;
    }
123 124 125
  }
  int remain = size % (VecSize * blockDim.x);

126 127 128 129
  T ins_x[VecSize];
  T ins_y[VecSize];
  VecT* ins_vec_x = reinterpret_cast<VecT*>(&ins_x);
  VecT* ins_vec_y = reinterpret_cast<VecT*>(&ins_y);
130 131 132

  // vector part
  for (; VecSize * tid < (size - remain); tid += blockDim.x) {
133 134 135 136
    *ins_vec_x = reinterpret_cast<const VecT*>(x)[tid];
    if (Num == 2) {
      *ins_vec_y = reinterpret_cast<const VecT*>(y)[tid];
    }
137 138 139

#pragma unroll
    for (int i = 0; i < VecSize; ++i) {
140 141 142 143 144 145 146
      if (Num == 1) {
        *out_mean += ins_x[i];
        *out_var += ins_x[i] * ins_x[i];
      } else if (Num == 2) {
        *out_mean += ins_y[i];
        *out_var += ins_y[i] * ins_x[i];
      }
147 148 149 150 151 152
    }
  }

  // scalar part
  tid = size - remain + threadIdx.x;
  for (; tid < size; tid += blockDim.x) {
153 154 155 156 157 158 159
    if (Num == 1) {
      *out_mean += x[tid];
      *out_var += x[tid] * x[tid];
    } else if (Num == 2) {
      *out_mean += y[tid];
      *out_var += y[tid] * x[tid];
    }
160 161 162
  }
}

163
template <typename T>
164 165
__device__ __forceinline__ void ReduceMeanAndVar(
    T* mean, T* var, T x_mean, T x_var, int size) {
166 167 168 169 170 171 172 173 174 175 176 177
  const int nc = blockIdx.x;
  x_mean = kps::details::BlockXReduce<T, kps::AddFunctor<T>>(
      x_mean, kps::AddFunctor<T>());
  x_var = kps::details::BlockXReduce<T, kps::AddFunctor<T>>(
      x_var, kps::AddFunctor<T>());
  __syncthreads();
  if (threadIdx.x == 0) {
    mean[nc] = static_cast<T>(x_mean / size);
    var[nc] = static_cast<T>(x_var / size);
  }
}

178 179 180 181 182 183 184 185 186 187
template <typename T>
__global__ void ScalarGetMeanAndVarNCHW(const T* x, T* mean, T* var, int size) {
  int i = blockIdx.x;
  T x_mean = 0, x_var = 0;
  for (int j = threadIdx.x; j < size; j += blockDim.x) {
    T val;
    val = x[i * size + j];
    x_mean += val;
    x_var += val * val;
  }
188
  ReduceMeanAndVar<T>(mean, var, x_mean, x_var, size);
189 190 191
}

template <typename T, typename AccT, int VecSize>
192 193 194
__global__ void VectorizedGetMeanAndVarNCHW(const T* x,
                                            T* mean,
                                            T* var,
195 196 197 198 199
                                            int size) {
  int i = blockIdx.x;
  AccT x_mean = static_cast<AccT>(0);
  AccT x_var = static_cast<AccT>(0);
  x += i * size;
200
  const int input_offset = ((uint64_t)x) % ALIGN_BYTES / sizeof(T);
201 202 203
  phi::Array<const T*, 1> ins;
  ins[0] = x;
  ThreadReduce<T, AccT, VecSize, 1>(ins, size, input_offset, &x_mean, &x_var);
204
  ReduceMeanAndVar<AccT>(mean, var, x_mean, x_var, size);
205 206
}

207
template <typename T, int flags>
208 209 210 211 212 213 214 215 216 217 218 219 220 221
__global__ void GroupNormForward(const T* x,
                                 const T* mean,
                                 const T* var,
                                 const T* scale,
                                 const T* bias,
                                 int N,
                                 int C,
                                 int W,
                                 int imsize,
                                 int groups,
                                 int group_size,
                                 T epsilon,
                                 T* y,
                                 T* real_var,
222
                                 const DataLayout data_layout) {
D
Dun 已提交
223 224 225
  int gid = blockIdx.y;
  int cid = blockIdx.x;
  int bid = blockIdx.z;
226
  int H = imsize / W;
D
Dun 已提交
227 228
  int ccid = gid * group_size + cid;
  if (ccid >= C) return;
229 230 231
  auto ng = bid * groups + gid;
  T x_mean = mean[ng];
  T x_var = var[ng];
D
Dun 已提交
232
  x_var = x_var - x_mean * x_mean;
233 234 235 236
  T var_inv = rsqrt(x_var + epsilon);
  if (cid == 0 && threadIdx.x == 0) {
    real_var[ng] = x_var;
  }
D
Dun 已提交
237
  for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
238 239
    T val;
    int hid, wid;
240
    int index = (bid * C + ccid) * imsize + imid;
241
    if (data_layout == DataLayout::kNCHW) {
242
      val = x[index];
243 244 245 246 247
    } else {
      hid = imid / W;
      wid = imid % W;
      val = x[(bid * H + hid) * W * C + wid * C + ccid];
    }
D
Dun 已提交
248
    val = (val - x_mean) * var_inv;
249 250 251 252 253 254
    if (flags & kHasScale) {
      val *= scale[ccid];
    }
    if (flags & kHasBias) {
      val += bias[ccid];
    }
255
    if (data_layout == DataLayout::kNCHW) {
256
      y[index] = val;
257 258 259
    } else {
      y[(bid * H + hid) * W * C + wid * C + ccid] = val;
    }
D
Dun 已提交
260 261 262 263
  }
}

template <typename T>
L
Leo Chen 已提交
264
class GroupNormKernel<phi::GPUContext, T> : public framework::OpKernel<T> {
D
Dun 已提交
265 266
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
267 268 269
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
D
Dun 已提交
270 271 272 273 274 275 276 277 278 279 280
    const float epsilon = ctx.Attr<float>("epsilon");
    auto* scale = ctx.Input<Tensor>("Scale");
    auto* bias = ctx.Input<Tensor>("Bias");
    auto* x = ctx.Input<Tensor>("X");

    auto* y = ctx.Output<Tensor>("Y");
    auto* mean = ctx.Output<Tensor>("Mean");
    auto* var = ctx.Output<Tensor>("Variance");
    const auto groups = ctx.Attr<int>("groups");

    const auto x_dims = x->dims();
281 282 283
    const int C =
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
284 285
    const int group_size = C / groups;

286 287 288
    const int W =
        (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
                                          : x_dims[x_dims.size() - 2]);
D
Dun 已提交
289 290 291 292

    y->mutable_data<T>(ctx.GetPlace());
    mean->mutable_data<T>(ctx.GetPlace());
    var->mutable_data<T>(ctx.GetPlace());
L
Leo Chen 已提交
293 294
    phi::funcs::SetConstant<phi::GPUContext, T> set_zero;
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
D
Dun 已提交
295 296 297 298 299 300 301 302 303 304 305 306 307
    Tensor temp_var;
    temp_var.mutable_data<T>(var->dims(), ctx.GetPlace());
    auto* x_data = x->data<T>();
    auto* y_data = y->data<T>();
    auto* mean_data = mean->data<T>();
    auto* var_data = var->data<T>();
    auto* temp_var_data = temp_var.data<T>();

    const T* scale_data = nullptr;
    if (scale) scale_data = scale->data<T>();
    const T* bias_data = nullptr;
    if (bias) bias_data = bias->data<T>();

308 309 310 311 312 313 314 315 316 317
    int imsize = 1;
    if (data_layout == DataLayout::kNCHW) {
      for (int i = 2; i < x_dims.size(); ++i) {
        imsize *= x_dims[i];
      }
    } else {
      for (int i = 1; i < x_dims.size() - 1; ++i) {
        imsize *= x_dims[i];
      }
    }
318

R
ronnywang 已提交
319 320 321
#ifdef __HIPCC__
    int block_size = std::max(std::min(256, imsize), 64);
#else
322
    int block_size = std::min(1024, imsize);
R
ronnywang 已提交
323
#endif
324

D
Dun 已提交
325 326
    dim3 grid(group_size, groups, x_dims[0]);
    dim3 threads(block_size, 1, 1);
327 328 329 330 331 332 333 334 335 336 337 338 339
    if (data_layout == DataLayout::kNCHW) {
      using AccT = typename details::MPTypeTrait<T>::Type;
      constexpr int vec_size = sizeof(float4) / sizeof(T);
      int size = group_size * imsize;
      const int max_num_threads = 1024;
      int max_block_size = std::min(size / vec_size, max_num_threads);
      int block_size_nchw = 1;
      while (block_size_nchw < max_block_size) {
        block_size_nchw *= 2;
      }
      block_size_nchw = std::max(block_size_nchw, kps::details::kWarpSize);
      dim3 grids(x_dims[0] * groups);
      dim3 blocks(block_size_nchw);
340
      if (size < vec_size * block_size_nchw) {
341 342 343
        ScalarGetMeanAndVarNCHW<T><<<grids, blocks, 0, dev_ctx.stream()>>>(
            x_data, mean_data, temp_var_data, size);
      } else {
344
        VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>
345 346
            <<<grids, blocks, 0, dev_ctx.stream()>>>(
                x_data, mean_data, temp_var_data, size);
347 348
      }
    } else {
349 350
      set_zero(dev_ctx, mean, static_cast<T>(0));
      set_zero(dev_ctx, &temp_var, static_cast<T>(0));
351 352 353 354 355 356 357 358 359 360
      GroupNormForwardGetMeanAndVar<T>
          <<<grid, threads, 0, dev_ctx.stream()>>>(x_data,
                                                   x_dims[0],
                                                   C,
                                                   W,
                                                   imsize,
                                                   groups,
                                                   group_size,
                                                   mean_data,
                                                   temp_var_data);
361
    }
362 363
    int flags =
        (scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
    UNROLL_ALL_CASES(flags,
                     GroupNormForward,
                     x_data,
                     mean_data,
                     temp_var_data,
                     scale_data,
                     bias_data,
                     x_dims[0],
                     C,
                     W,
                     imsize,
                     groups,
                     group_size,
                     epsilon,
                     y_data,
                     var_data,
                     data_layout);
D
Dun 已提交
381 382 383
  }
};

384
template <typename T, int flags>
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
__global__ void GroupNormBackwardGetMeanAndVar(const T* x,
                                               const T* scale,
                                               const T* bias,
                                               const T* d_y,
                                               int N,
                                               int C,
                                               int W,
                                               int imsize,
                                               int groups,
                                               int group_size,
                                               T epsilon,
                                               T* d_mean,
                                               T* d_var,
                                               T* d_scale,
                                               T* d_bias) {
D
Dun 已提交
400 401 402
  int gid = blockIdx.y;
  int cid = blockIdx.x;
  int bid = blockIdx.z;
403
  int H = imsize / W;
D
Dun 已提交
404 405 406
  int number = min(group_size, static_cast<int>(C - gid * group_size));
  int ccid = gid * group_size + cid;
  if (ccid >= C) return;
407 408 409 410
  T x_scale = (flags & kHasScale) ? scale[ccid] : 1;
  T x_bias = (flags & kHasBias) ? bias[ccid] : 0;
  T x_scale_inv = 0;
  if (x_scale != 0) x_scale_inv = 1.0 / x_scale;
D
Dun 已提交
411 412 413
  T d_mean_data = 0, d_var_data = 0, d_scale_data = 0, d_bias_data = 0;

  for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
414
    T val, dval;
415 416 417 418 419

    int hid = imid / W;
    int wid = imid % W;
    val = x[(bid * H + hid) * W * C + wid * C + ccid] - x_bias;
    dval = d_y[(bid * H + hid) * W * C + wid * C + ccid];
D
Dun 已提交
420

421 422 423 424 425 426
    d_var_data += val * dval;
    d_mean_data += dval * x_scale;

    val = val * x_scale_inv;
    d_bias_data += dval;
    d_scale_data += val * dval;
D
Dun 已提交
427
  }
428 429 430 431
  CudaAtomicAddWithWarp(&(d_mean[bid * groups + gid]), d_mean_data);
  CudaAtomicAddWithWarp(&(d_var[bid * groups + gid]), d_var_data);
  if (flags & kHasScale) CudaAtomicAddWithWarp(&(d_scale[ccid]), d_scale_data);
  if (flags & kHasBias) CudaAtomicAddWithWarp(&(d_bias[ccid]), d_bias_data);
D
Dun 已提交
432 433
}

434
template <typename T, int flags>
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
__global__ void GroupNormBackward(const T* x,
                                  const T* d_y,
                                  const T* scale,
                                  const T* bias,
                                  const T* var,
                                  const T* d_mean,
                                  const T* d_var,
                                  int N,
                                  int C,
                                  int W,
                                  int imsize,
                                  int groups,
                                  int group_size,
                                  T epsilon,
                                  T* d_x) {
D
Dun 已提交
450 451 452
  int gid = blockIdx.y;
  int cid = blockIdx.x;
  int bid = blockIdx.z;
453
  int H = imsize / W;
D
Dun 已提交
454 455 456 457 458
  int number = min(group_size, static_cast<int>(C - gid * group_size));
  int ccid = gid * group_size + cid;
  if (ccid >= C) return;
  T x_var = var[bid * groups + gid];
  T d_x_mean = d_mean[bid * groups + gid];
459 460 461 462 463 464 465 466 467
  T d_x_var = d_var[bid * groups + gid];

  T x_var_inv = 1.0 / sqrt(x_var + epsilon);
  T number_inv = 1.0 / (number * imsize);

  T x_scale = (flags & kHasScale) ? scale[ccid] : 1;
  T x_bias = (flags & kHasBias) ? bias[ccid] : 0;
  T x_scale_inv = 0;
  if (x_scale != 0) x_scale_inv = 1.0 / x_scale;
D
Dun 已提交
468 469

  for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
470 471 472 473 474 475 476 477 478 479 480 481
    int hid = imid / W;
    int wid = imid % W;
    T tmp = x[(bid * H + hid) * W * C + wid * C + ccid];
    T v_y = (tmp - x_bias) * x_scale_inv;
    T dly = d_y[(bid * H + hid) * W * C + wid * C + ccid];
    d_x[(bid * H + hid) * W * C + wid * C + ccid] =
        x_var_inv *
        (dly * x_scale - number_inv * d_x_var * v_y - number_inv * d_x_mean);
  }
}

template <typename T>
482 483
__global__ void ScalarGetDsDbCUDAKernel(
    int imsize, const T* x, const T* dy, T* ds, T* db) {
484 485 486 487 488 489 490 491
  const int nc = blockIdx.x;
  T ds_sum = 0;
  T db_sum = 0;
  for (int i = threadIdx.x; i < imsize; i += blockDim.x) {
    const int index = nc * imsize + i;
    ds_sum += dy[index] * x[index];
    db_sum += dy[index];
  }
492
  ReduceMeanAndVar<T>(db, ds, db_sum, ds_sum, 1);
493 494 495
}

template <typename T>
496 497 498 499 500 501 502 503 504
__global__ void GetScaleBiasGradientCUDAKernel(int N,
                                               int C,
                                               int group,
                                               T epsilon,
                                               const T* mean,
                                               const T* var,
                                               const T* ds,
                                               const T* db,
                                               T* d_scale,
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
                                               T* d_bias) {
  const int c = blockIdx.x * blockDim.x + threadIdx.x;
  if (c < C) {
    const int G = group;
    const int D = C / G;
    T sum1 = 0;
    T sum2 = 0;
    for (int n = 0; n < N; ++n) {
      const int nc = n * C + c;
      const int ng = n * G + c / D;
      sum1 += (d_scale == nullptr)
                  ? T(0)
                  : ((ds[nc] - db[nc] * static_cast<T>(mean[ng])) *
                     static_cast<T>(rsqrt(var[ng] + epsilon)));
      sum2 += (d_bias == nullptr) ? T(0) : db[nc];
    }
    if (d_scale != nullptr) {
      d_scale[c] = sum1;
    }
    if (d_bias != nullptr) {
      d_bias[c] = sum2;
526
    }
D
Dun 已提交
527 528 529
  }
}

530
template <typename T, int BlockDim>
531 532 533 534 535 536 537 538 539 540 541 542
__global__ void GetBackwardParamsCUDAKernel(int imsize,
                                            int groups,
                                            int group_size,
                                            T epsilon,
                                            const T* mean,
                                            const T* var,
                                            const T* scale,
                                            const T* ds,
                                            const T* db,
                                            T* p1,
                                            T* p2,
                                            T* p3) {
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
  const int n = blockIdx.x;
  const int g = blockIdx.y;
  const int ng = n * groups + g;
  T sum1 = 0;
  T sum2 = 0;
  T var_inv = rsqrt(var[ng] + epsilon);
  for (int64_t i = threadIdx.x; i < group_size; i += blockDim.x) {
    const int64_t index = ng * group_size + i;
    const int64_t c = g * group_size + i;
    const T scale_v = scale == nullptr ? T(1) : static_cast<T>(scale[c]);
    sum1 += ds[index] * scale_v;
    sum2 += db[index] * scale_v;
    const T scale_c = scale == nullptr ? T(0) : static_cast<T>(scale[c]);
    p1[index] = scale_c * var_inv;
  }

  typedef cub::BlockReduce<T, BlockDim> BlockReduce;
  __shared__ typename BlockReduce::TempStorage ds_storage;
  __shared__ typename BlockReduce::TempStorage db_storage;
  sum1 = BlockReduce(ds_storage).Reduce(sum1, cub::Sum());
  sum2 = BlockReduce(db_storage).Reduce(sum2, cub::Sum());

  if (threadIdx.x == 0) {
    const T s = T(1) / static_cast<T>(group_size * imsize);
    const T x = (sum2 * static_cast<T>(mean[ng]) - sum1) *
                static_cast<T>(var_inv) * static_cast<T>(var_inv) *
                static_cast<T>(var_inv) * s;
    p2[ng] = x;
    p3[ng] = -x * static_cast<T>(mean[ng]) - sum2 * static_cast<T>(var_inv) * s;
  }
}

template <typename T>
576 577 578 579 580 581 582 583 584 585
__global__ void GetXGradientCUDAKernel(int imsize,
                                       int C,
                                       int group_size,
                                       int groups,
                                       T* p1,
                                       T* p2,
                                       T* p3,
                                       const T* x,
                                       const T* dy,
                                       T* dx) {
586 587 588 589 590 591 592 593 594 595 596 597
  int cid = blockIdx.x;
  int gid = blockIdx.y;
  int bid = blockIdx.z;
  int ccid = bid * C + gid * group_size + cid;
  int ng = bid * groups + gid;
  int nc = gid * group_size + cid;
  for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
    int index = (bid * C + nc) * imsize + imid;
    dx[index] = p1[ccid] * dy[index] + p2[ng] * x[index] + p3[ng];
  }
}

D
Dun 已提交
598
template <typename T>
L
Leo Chen 已提交
599
class GroupNormGradKernel<phi::GPUContext, T> : public framework::OpKernel<T> {
D
Dun 已提交
600 601
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
602 603 604
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
D
Dun 已提交
605
    const float epsilon = ctx.Attr<float>("epsilon");
606 607 608
    auto* x = ctx.Input<Tensor>("X");
    auto* y = ctx.Input<Tensor>("Y");
    auto* mean = ctx.Input<Tensor>("Mean");
D
Dun 已提交
609 610
    auto* var = ctx.Input<Tensor>("Variance");
    auto* scale = ctx.Input<Tensor>("Scale");
611
    auto* bias = ctx.Input<Tensor>("Bias");
D
Dun 已提交
612 613 614 615 616 617 618 619 620
    auto* d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
    const auto groups = ctx.Attr<int>("groups");

    // init output
    auto* d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto* d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
    auto* d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));

    const auto& x_dims = x->dims();
621 622 623
    const int C =
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
624
    const int group_size = C / groups;
625 626 627
    const int W =
        (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
                                          : x_dims[x_dims.size() - 2]);
D
Dun 已提交
628

629
    d_x->mutable_data<T>(ctx.GetPlace());
L
Leo Chen 已提交
630 631
    phi::funcs::SetConstant<phi::GPUContext, T> set_zero;
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
D
Dun 已提交
632

633 634 635 636 637
    Tensor ds, db;
    ds.mutable_data<T>({x_dims[0], C}, ctx.GetPlace());
    db.mutable_data<T>({x_dims[0], C}, ctx.GetPlace());
    T* ds_data = ds.data<T>();
    T* db_data = db.data<T>();
D
Dun 已提交
638

639
    auto* y_data = y->data<T>();
D
Dun 已提交
640
    auto* x_data = x->data<T>();
641 642
    T* d_x_data = nullptr;
    if (d_x) d_x_data = d_x->data<T>();
643
    auto* dy_data = d_y->data<T>();
D
Dun 已提交
644
    auto* var_data = var->data<T>();
645
    auto* mean_data = mean->data<T>();
D
Dun 已提交
646 647 648 649 650 651 652 653 654 655 656 657 658
    T* d_scale_data = nullptr;
    if (d_scale) {
      d_scale->mutable_data<T>(ctx.GetPlace());
      d_scale_data = d_scale->data<T>();
    }
    T* d_bias_data = nullptr;
    if (d_bias) {
      d_bias->mutable_data<T>(ctx.GetPlace());
      d_bias_data = d_bias->data<T>();
    }

    const T* scale_data = nullptr;
    if (scale) scale_data = scale->data<T>();
659 660
    const T* bias_data = nullptr;
    if (bias) bias_data = bias->data<T>();
D
Dun 已提交
661

662 663 664 665 666 667 668 669 670 671
    int imsize = 1;
    if (data_layout == DataLayout::kNCHW) {
      for (int i = 2; i < x_dims.size(); ++i) {
        imsize *= x_dims[i];
      }
    } else {
      for (int i = 1; i < x_dims.size() - 1; ++i) {
        imsize *= x_dims[i];
      }
    }
672

R
ronnywang 已提交
673 674
#ifdef __HIPCC__
    int block_size = std::max(std::min(256, imsize), 64);
675
    const int block_dims = 256;
R
ronnywang 已提交
676
#else
677
    int block_size = std::min(1024, imsize);
678
    const int block_dims = 1024;
R
ronnywang 已提交
679
#endif
D
Dun 已提交
680 681
    dim3 grid(group_size, groups, x_dims[0]);
    dim3 threads(block_size, 1, 1);
682 683
    int flags =
        (scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
684
    if (data_layout == DataLayout::kNCHW) {
685 686 687 688 689 690 691 692
      const int max_num_threads = 1024;
      int max_block_size = std::min(imsize, max_num_threads);
      int block_size_nchw = 1;
      while (block_size_nchw < max_block_size) {
        block_size_nchw *= 2;
      }
      block_size_nchw = std::max(block_size_nchw, kps::details::kWarpSize);
      dim3 blocks(block_size_nchw);
693 694 695
      ScalarGetDsDbCUDAKernel<T>
          <<<x_dims[0] * C, blocks, 0, dev_ctx.stream()>>>(
              imsize, x_data, dy_data, ds_data, db_data);
696 697 698

      if (d_scale || d_bias) {
        const int block = 256;
699 700
        GetScaleBiasGradientCUDAKernel<T>
            <<<(C + block - 1) / block, block, 0, dev_ctx.stream()>>>(
701 702 703 704 705 706 707 708 709 710
                x_dims[0],
                C,
                groups,
                epsilon,
                mean_data,
                var_data,
                ds_data,
                db_data,
                d_scale_data,
                d_bias_data);
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
      }

      if (d_x_data != nullptr) {
        // p1 * dy + p2 * x + p3,
        // p1, p2, p3 represent the reverse calculation of temporary variables
        // p1 = scale * var_inv
        // p2 = (db * scale * mean - ds * scale) * pow(var_inv, 3) * (1/n)
        // p3 = -p2 * mean[ng] - db * scale * var_inv * (1/n);
        Tensor p1, p2, p3;
        p1.mutable_data<T>({x_dims[0] * C}, ctx.GetPlace());
        p2.mutable_data<T>({x_dims[0], groups}, ctx.GetPlace());
        p3.mutable_data<T>({x_dims[0], groups}, ctx.GetPlace());
        T* p1_data = p1.data<T>();
        T* p2_data = p2.data<T>();
        T* p3_data = p3.data<T>();

727 728
        GetBackwardParamsCUDAKernel<T, block_dims>
            <<<dim3(x_dims[0], groups), block_dims, 0, dev_ctx.stream()>>>(
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
                imsize,
                groups,
                group_size,
                epsilon,
                mean_data,
                var_data,
                scale_data,
                ds_data,
                db_data,
                p1_data,
                p2_data,
                p3_data);
        GetXGradientCUDAKernel<T>
            <<<grid, threads, 0, dev_ctx.stream()>>>(imsize,
                                                     C,
                                                     group_size,
                                                     groups,
                                                     p1_data,
                                                     p2_data,
                                                     p3_data,
                                                     x_data,
                                                     dy_data,
                                                     d_x_data);
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
      }
    } else {
      if (d_scale) {
        set_zero(dev_ctx, d_scale, static_cast<T>(0));
      }
      if (d_bias) {
        set_zero(dev_ctx, d_bias, static_cast<T>(0));
      }

      Tensor temp_var;
      temp_var.mutable_data<T>(var->dims(), ctx.GetPlace());
      set_zero(dev_ctx, &temp_var, static_cast<T>(0));
      T* temp_var_data = temp_var.data<T>();

      Tensor temp_mean;
      temp_mean.mutable_data<T>(var->dims(), ctx.GetPlace());
      set_zero(dev_ctx, &temp_mean, static_cast<T>(0));
      T* temp_mean_data = temp_mean.data<T>();

      int flags = (scale_data != nullptr) * kHasScale +
                  (bias_data != nullptr) * kHasBias;
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
      UNROLL_ALL_CASES(flags,
                       GroupNormBackwardGetMeanAndVar,
                       y_data,
                       scale_data,
                       bias_data,
                       dy_data,
                       x_dims[0],
                       C,
                       W,
                       imsize,
                       groups,
                       group_size,
                       epsilon,
                       temp_mean_data,
                       temp_var_data,
                       d_scale_data,
                       d_bias_data);
790
      if (d_x_data != nullptr) {
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
        UNROLL_ALL_CASES(flags,
                         GroupNormBackward,
                         y_data,
                         dy_data,
                         scale_data,
                         bias_data,
                         var_data,
                         temp_mean_data,
                         temp_var_data,
                         x_dims[0],
                         C,
                         W,
                         imsize,
                         groups,
                         group_size,
                         epsilon,
807 808
                         d_x_data);
      }
809
    }
D
Dun 已提交
810 811 812 813 814 815 816
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
L
Leo Chen 已提交
817 818 819 820 821 822
REGISTER_OP_CUDA_KERNEL(group_norm,
                        ops::GroupNormKernel<phi::GPUContext, float>,
                        ops::GroupNormKernel<phi::GPUContext, double>);
REGISTER_OP_CUDA_KERNEL(group_norm_grad,
                        ops::GroupNormGradKernel<phi::GPUContext, float>,
                        ops::GroupNormGradKernel<phi::GPUContext, double>);