gpu_primitives.h 23.4 KB
Newer Older
1
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
W
Wang Xin 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#ifdef PADDLE_WITH_CUDA
#include <cuda.h>
#endif
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#endif
#include <stdio.h>

#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/complex.h"
#include "paddle/phi/common/float16.h"

template <typename T>
using complex = phi::dtype::complex<T>;

namespace phi {

#define CUDA_ATOMIC_WRAPPER(op, T) \
  __device__ __forceinline__ T CudaAtomic##op(T *address, const T val)

#define USE_CUDA_ATOMIC(op, T) \
  CUDA_ATOMIC_WRAPPER(op, T) { return atomic##op(address, val); }

// Default thread count per block(or block size).
// TODO(typhoonzero): need to benchmark against setting this value
//                    to 1024.
constexpr int PADDLE_CUDA_NUM_THREADS = 512;

// For atomicAdd.
USE_CUDA_ATOMIC(Add, float);
USE_CUDA_ATOMIC(Add, int);
USE_CUDA_ATOMIC(Add, unsigned int);
// CUDA API uses unsigned long long int, we cannot use uint64_t here.
// It because unsigned long long int is not necessarily uint64_t
USE_CUDA_ATOMIC(Add, unsigned long long int);  // NOLINT

CUDA_ATOMIC_WRAPPER(Add, int64_t) {
  // Here, we check long long int must be int64_t.
  static_assert(sizeof(int64_t) == sizeof(long long int),  // NOLINT
                "long long should be int64");
  return CudaAtomicAdd(
      reinterpret_cast<unsigned long long int *>(address),  // NOLINT
      static_cast<unsigned long long int>(val));            // NOLINT
}

#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600)
USE_CUDA_ATOMIC(Add, double);
#else
CUDA_ATOMIC_WRAPPER(Add, double) {
  unsigned long long int *address_as_ull =                  // NOLINT
      reinterpret_cast<unsigned long long int *>(address);  // NOLINT
  unsigned long long int old = *address_as_ull, assumed;    // NOLINT

  do {
    assumed = old;
    old = atomicCAS(address_as_ull,
                    assumed,
                    __double_as_longlong(val + __longlong_as_double(assumed)));

    // Note: uses integer comparison to avoid hang in case of NaN
  } while (assumed != old);

  return __longlong_as_double(old);
}
#endif

#ifdef PADDLE_CUDA_FP16
// NOTE(dzhwinter): cuda do not have atomicCAS for half.
// Just use the half address as a unsigned value address and
// do the atomicCAS. According to the value store at high 16 bits
// or low 16 bits, then do a different sum and CAS.
// Given most warp-threads will failed on the atomicCAS, so this
// implemented should be avoided in high concurrency. It's will be
// slower than the way convert value into 32bits and do a full atomicCAS.

// convert the value into float and do the add arithmetic.
// then store the result into a uint32.
inline static __device__ uint32_t add_to_low_half(uint32_t val, float x) {
94
  phi::dtype::float16 low_half;
W
Wang Xin 已提交
95 96
  // the float16 in lower 16bits
  low_half.x = static_cast<uint16_t>(val & 0xFFFFu);
97
  low_half = static_cast<phi::dtype::float16>(static_cast<float>(low_half) + x);
W
Wang Xin 已提交
98 99 100 101
  return (val & 0xFFFF0000u) | low_half.x;
}

inline static __device__ uint32_t add_to_high_half(uint32_t val, float x) {
102
  phi::dtype::float16 high_half;
W
Wang Xin 已提交
103 104
  // the float16 in higher 16bits
  high_half.x = static_cast<uint16_t>(val >> 16);
105 106
  high_half =
      static_cast<phi::dtype::float16>(static_cast<float>(high_half) + x);
W
Wang Xin 已提交
107 108 109 110
  return (val & 0xFFFFu) | (static_cast<uint32_t>(high_half.x) << 16);
}

#if CUDA_VERSION >= 10000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
111 112 113
static __device__ __forceinline__ phi::dtype::float16 CUDAFP16ToPDFP16(
    __half x) {
  return *reinterpret_cast<phi::dtype::float16 *>(&x);
W
Wang Xin 已提交
114 115
}

116 117
static __device__ __forceinline__ __half
PDFP16ToCUDAFP16(phi::dtype::float16 x) {
W
Wang Xin 已提交
118 119 120
  return *reinterpret_cast<__half *>(&x);
}

121
CUDA_ATOMIC_WRAPPER(Add, phi::dtype::float16) {
W
Wang Xin 已提交
122 123 124 125
  return CUDAFP16ToPDFP16(
      atomicAdd(reinterpret_cast<__half *>(address), PDFP16ToCUDAFP16(val)));
}
#else
126
CUDA_ATOMIC_WRAPPER(Add, phi::dtype::float16) {
W
Wang Xin 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
  // concrete packed float16 value may exsits in lower or higher 16bits
  // of the 32bits address.
  uint32_t *address_as_ui = reinterpret_cast<uint32_t *>(
      reinterpret_cast<char *>(address) -
      (reinterpret_cast<uintptr_t>(address) & 0x02));
  float val_f = static_cast<float>(val);
  uint32_t old = *address_as_ui;
  uint32_t sum;
  uint32_t newval;
  uint32_t assumed;
  if (((uintptr_t)address & 0x02) == 0) {
    // the float16 value stay at lower 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(address_as_ui, assumed, add_to_low_half(assumed, val_f));
    } while (old != assumed);
143
    phi::dtype::float16 ret;
W
Wang Xin 已提交
144 145 146 147 148 149 150 151
    ret.x = old & 0xFFFFu;
    return ret;
  } else {
    // the float16 value stay at higher 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(address_as_ui, assumed, add_to_high_half(assumed, val_f));
    } while (old != assumed);
152
    phi::dtype::float16 ret;
W
Wang Xin 已提交
153 154 155 156 157 158
    ret.x = old >> 16;
    return ret;
  }
}
#endif

159 160 161 162 163 164 165 166 167 168 169 170
template <typename T, bool IsAvailable, typename NVType, typename NVVec2Type>
struct VecAtomicAddHelperBase {
  static constexpr auto kIsAvailable = IsAvailable;
  using NVT = NVType;
  using NVVec2T = NVVec2Type;
};

template <typename T>
struct VecAtomicAddHelper : VecAtomicAddHelperBase<T, false, void, void> {};

#if CUDA_VERSION >= 10000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
template <>
171 172
struct VecAtomicAddHelper<phi::dtype::float16>
    : VecAtomicAddHelperBase<phi::dtype::float16, true, __half, __half2> {};
173 174 175 176
#endif

#if CUDA_VERSION >= 11000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
template <>
177 178 179 180 181
struct VecAtomicAddHelper<phi::dtype::bfloat16>
    : VecAtomicAddHelperBase<phi::dtype::bfloat16,
                             true,
                             __nv_bfloat16,
                             __nv_bfloat162> {};
182 183
#endif

W
Wang Xin 已提交
184 185
// The performance of "atomicAdd(half* )" is bad, but for "atomicAdd(half2* )"
// is good. So for fp16 type, we can use "atomicAdd(half2* )" to speed up.
186 187 188
template <typename T,
          typename std::enable_if<VecAtomicAddHelper<T>::kIsAvailable>::type * =
              nullptr>
W
Wang Xin 已提交
189 190 191 192 193
__device__ __forceinline__ void fastAtomicAdd(T *tensor,
                                              size_t index,
                                              const size_t numel,
                                              T value) {
  // whether the address is 32-byte aligned.
194 195 196
  using NVT = typename VecAtomicAddHelper<T>::NVT;
  using NVVec2T = typename VecAtomicAddHelper<T>::NVVec2T;
  NVT *target_addr = reinterpret_cast<NVT *>(tensor + index);
W
Wang Xin 已提交
197
  bool aligned_half2 =
198
      (reinterpret_cast<std::uintptr_t>(target_addr) % sizeof(NVVec2T) == 0);
W
Wang Xin 已提交
199 200

  if (aligned_half2 && index < (numel - 1)) {
201 202 203 204
    NVVec2T value2;
    value2.x = *reinterpret_cast<NVT *>(&value);
    value2.y = 0.0;
    atomicAdd(reinterpret_cast<NVVec2T *>(target_addr), value2);
W
Wang Xin 已提交
205 206

  } else if (!aligned_half2 && index > 0) {
207 208 209 210
    NVVec2T value2;
    value2.x = 0.0;
    value2.y = *reinterpret_cast<NVT *>(&value);
    atomicAdd(reinterpret_cast<NVVec2T *>(target_addr - 1), value2);
W
Wang Xin 已提交
211 212

  } else {
213 214
    atomicAdd(reinterpret_cast<NVT *>(tensor) + index,
              *reinterpret_cast<NVT *>(&value));
W
Wang Xin 已提交
215 216 217
  }
}

218 219 220
template <typename T,
          typename std::enable_if<!VecAtomicAddHelper<T>::kIsAvailable>::type
              * = nullptr>
W
Wang Xin 已提交
221 222 223 224 225 226 227 228 229 230
__device__ __forceinline__ void fastAtomicAdd(T *arr,
                                              size_t index,
                                              const size_t numel,
                                              T value) {
  CudaAtomicAdd(arr + index, value);
}
#endif

// NOTE(zhangbo): cuda do not have atomicCAS for __nv_bfloat16.
inline static __device__ uint32_t bf16_add_to_low_half(uint32_t val, float x) {
231
  phi::dtype::bfloat16 low_half;
W
Wang Xin 已提交
232 233
  // the bfloat16 in lower 16bits
  low_half.x = static_cast<uint16_t>(val & 0xFFFFu);
234 235
  low_half =
      static_cast<phi::dtype::bfloat16>(static_cast<float>(low_half) + x);
W
Wang Xin 已提交
236 237 238 239
  return (val & 0xFFFF0000u) | low_half.x;
}

inline static __device__ uint32_t bf16_add_to_high_half(uint32_t val, float x) {
240
  phi::dtype::bfloat16 high_half;
W
Wang Xin 已提交
241 242
  // the bfloat16 in higher 16bits
  high_half.x = static_cast<uint16_t>(val >> 16);
243 244
  high_half =
      static_cast<phi::dtype::bfloat16>(static_cast<float>(high_half) + x);
W
Wang Xin 已提交
245 246 247 248
  return (val & 0xFFFFu) | (static_cast<uint32_t>(high_half.x) << 16);
}

#if CUDA_VERSION >= 11000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
249 250 251
static __device__ __forceinline__ phi::dtype::bfloat16 CUDABF16ToPDBF16(
    __nv_bfloat16 x) {
  return *reinterpret_cast<phi::dtype::bfloat16 *>(&x);
W
Wang Xin 已提交
252 253
}

254 255
static __device__ __forceinline__ __nv_bfloat16
PDBF16ToCUDABF16(phi::dtype::bfloat16 x) {
W
Wang Xin 已提交
256 257 258
  return *reinterpret_cast<__nv_bfloat16 *>(&x);
}

259
CUDA_ATOMIC_WRAPPER(Add, phi::dtype::bfloat16) {
W
Wang Xin 已提交
260 261 262 263
  return CUDABF16ToPDBF16(atomicAdd(reinterpret_cast<__nv_bfloat16 *>(address),
                                    PDBF16ToCUDABF16(val)));
}
#else
264
CUDA_ATOMIC_WRAPPER(Add, phi::dtype::bfloat16) {
W
Wang Xin 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
  // concrete packed bfloat16 value may exsits in lower or higher 16bits
  // of the 32bits address.
  uint32_t *address_as_ui = reinterpret_cast<uint32_t *>(
      reinterpret_cast<char *>(address) -
      (reinterpret_cast<uintptr_t>(address) & 0x02));
  float val_f = static_cast<float>(val);
  uint32_t old = *address_as_ui;
  uint32_t sum;
  uint32_t newval;
  uint32_t assumed;
  if (((uintptr_t)address & 0x02) == 0) {
    // the bfloat16 value stay at lower 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(
          address_as_ui, assumed, bf16_add_to_low_half(assumed, val_f));
    } while (old != assumed);
282
    phi::dtype::bfloat16 ret;
W
Wang Xin 已提交
283 284 285 286 287 288 289 290 291
    ret.x = old & 0xFFFFu;
    return ret;
  } else {
    // the bfloat16 value stay at higher 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(
          address_as_ui, assumed, bf16_add_to_high_half(assumed, val_f));
    } while (old != assumed);
292
    phi::dtype::bfloat16 ret;
W
Wang Xin 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
    ret.x = old >> 16;
    return ret;
  }
}
#endif

CUDA_ATOMIC_WRAPPER(Add, complex<float>) {
  float *real = reinterpret_cast<float *>(address);
  float *imag = real + 1;
  return complex<float>(CudaAtomicAdd(real, val.real),
                        CudaAtomicAdd(imag, val.imag));
}

CUDA_ATOMIC_WRAPPER(Add, complex<double>) {
  double *real = reinterpret_cast<double *>(address);
  double *imag = real + 1;
  return complex<double>(CudaAtomicAdd(real, val.real),
                         CudaAtomicAdd(imag, val.imag));
}

// For atomicMax
USE_CUDA_ATOMIC(Max, int);
USE_CUDA_ATOMIC(Max, unsigned int);
// CUDA API uses unsigned long long int, we cannot use uint64_t here.
// It because unsigned long long int is not necessarily uint64_t
#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350)
USE_CUDA_ATOMIC(Max, unsigned long long int);  // NOLINT
#else
CUDA_ATOMIC_WRAPPER(Max, unsigned long long int) {  // NOLINT
  if (*address >= val) {
    return *address;
  }

  unsigned long long int old = *address, assumed;  // NOLINT

  do {
    assumed = old;
    if (assumed >= val) {
      break;
    }

    old = atomicCAS(address, assumed, val);
  } while (assumed != old);
}
#endif

CUDA_ATOMIC_WRAPPER(Max, int64_t) {
  // Here, we check long long int must be int64_t.
  static_assert(sizeof(int64_t) == sizeof(long long int),  // NOLINT
                "long long should be int64");
  long long int res = *address;  // NOLINT
  while (val > res) {
    long long int old = res;                                           // NOLINT
    res = (long long int)atomicCAS((unsigned long long int *)address,  // NOLINT
                                   (unsigned long long int)old,        // NOLINT
                                   (unsigned long long int)val);       // NOLINT
    if (res == old) {
      break;
    }
  }
  return res;
}

CUDA_ATOMIC_WRAPPER(Max, float) {
  if (*address >= val) {
    return *address;
  }

  int *const address_as_i = reinterpret_cast<int *>(address);
  int old = *address_as_i, assumed;

  do {
    assumed = old;
    if (__int_as_float(assumed) >= val) {
      break;
    }

    old = atomicCAS(address_as_i, assumed, __float_as_int(val));
  } while (assumed != old);

  return __int_as_float(old);
}

CUDA_ATOMIC_WRAPPER(Max, double) {
  if (*address >= val) {
    return *address;
  }

  unsigned long long int *const address_as_ull =            // NOLINT
      reinterpret_cast<unsigned long long int *>(address);  // NOLINT
  unsigned long long int old = *address_as_ull, assumed;    // NOLINT

  do {
    assumed = old;
    if (__longlong_as_double(assumed) >= val) {
      break;
    }

    old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val));
  } while (assumed != old);

  return __longlong_as_double(old);
}

#ifdef PADDLE_CUDA_FP16
inline static __device__ uint32_t max_to_low_half(uint32_t val, float x) {
399
  phi::dtype::float16 low_half;
W
Wang Xin 已提交
400 401
  // The float16 in lower 16bits
  low_half.x = static_cast<uint16_t>(val & 0xFFFFu);
402 403
  low_half =
      static_cast<phi::dtype::float16>(max(static_cast<float>(low_half), x));
W
Wang Xin 已提交
404 405 406 407
  return (val & 0xFFFF0000u) | low_half.x;
}

inline static __device__ uint32_t max_to_high_half(uint32_t val, float x) {
408
  phi::dtype::float16 high_half;
W
Wang Xin 已提交
409 410
  // The float16 in higher 16bits
  high_half.x = static_cast<uint16_t>(val >> 16);
411 412
  high_half =
      static_cast<phi::dtype::float16>(max(static_cast<float>(high_half), x));
W
Wang Xin 已提交
413 414 415
  return (val & 0xFFFFu) | (static_cast<uint32_t>(high_half.x) << 16);
}

416
CUDA_ATOMIC_WRAPPER(Max, phi::dtype::float16) {
W
Wang Xin 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
  if (*address >= val) {
    return *address;
  }
  uint32_t *address_as_ui = reinterpret_cast<uint32_t *>(
      reinterpret_cast<char *>(address) -
      (reinterpret_cast<uintptr_t>(address) & 0x02));
  float val_f = static_cast<float>(val);
  uint32_t old = *address_as_ui;
  uint32_t assumed;
  if (((uintptr_t)address & 0x02) == 0) {
    // The float16 value stay at lower 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(address_as_ui, assumed, max_to_low_half(assumed, val_f));
    } while (old != assumed);
432
    phi::dtype::float16 ret;
W
Wang Xin 已提交
433 434 435 436 437 438 439 440
    ret.x = old & 0xFFFFu;
    return ret;
  } else {
    // The float16 value stay at higher 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(address_as_ui, assumed, max_to_high_half(assumed, val_f));
    } while (old != assumed);
441
    phi::dtype::float16 ret;
W
Wang Xin 已提交
442 443 444 445 446 447
    ret.x = old >> 16;
    return ret;
  }
}
#endif

C
co63oc 已提交
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
inline static __device__ uint32_t bf16_max_to_low_half(uint32_t val, float x) {
  phi::dtype::bfloat16 low_half;
  // The bfloat16 in lower 16bits
  low_half.x = static_cast<uint16_t>(val & 0xFFFFu);
  low_half =
      static_cast<phi::dtype::bfloat16>(max(static_cast<float>(low_half), x));
  return (val & 0xFFFF0000u) | low_half.x;
}

inline static __device__ uint32_t bf16_max_to_high_half(uint32_t val, float x) {
  phi::dtype::bfloat16 high_half;
  // The bfloat16 in higher 16bits
  high_half.x = static_cast<uint16_t>(val >> 16);
  high_half =
      static_cast<phi::dtype::bfloat16>(max(static_cast<float>(high_half), x));
  return (val & 0xFFFFu) | (static_cast<uint32_t>(high_half.x) << 16);
}

CUDA_ATOMIC_WRAPPER(Max, phi::dtype::bfloat16) {
  if (*address >= val) {
    return *address;
  }
  uint32_t *address_as_ui = reinterpret_cast<uint32_t *>(
      reinterpret_cast<char *>(address) -
      (reinterpret_cast<uintptr_t>(address) & 0x02));
  float val_f = static_cast<float>(val);
  uint32_t old = *address_as_ui;
  uint32_t assumed;
  if (((uintptr_t)address & 0x02) == 0) {
    // The bfloat16 value stay at lower 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(
          address_as_ui, assumed, bf16_max_to_low_half(assumed, val_f));
    } while (old != assumed);
    phi::dtype::bfloat16 ret;
    ret.x = old & 0xFFFFu;
    return ret;
  } else {
    // The bfloat16 value stay at higher 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(
          address_as_ui, assumed, bf16_max_to_high_half(assumed, val_f));
    } while (old != assumed);
    phi::dtype::bfloat16 ret;
    ret.x = old >> 16;
    return ret;
  }
}

W
Wang Xin 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
// For atomicMin
USE_CUDA_ATOMIC(Min, int);
USE_CUDA_ATOMIC(Min, unsigned int);
// CUDA API uses unsigned long long int, we cannot use uint64_t here.
// It because unsigned long long int is not necessarily uint64_t
#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350)
USE_CUDA_ATOMIC(Min, unsigned long long int);  // NOLINT
#else
CUDA_ATOMIC_WRAPPER(Min, unsigned long long int) {  // NOLINT
  if (*address <= val) {
    return *address;
  }

  unsigned long long int old = *address, assumed;  // NOLINT

  do {
    assumed = old;
    if (assumed <= val) {
      break;
    }

    old = atomicCAS(address, assumed, val);
  } while (assumed != old);
}
#endif

CUDA_ATOMIC_WRAPPER(Min, int64_t) {
  // Here, we check long long int must be int64_t.
  static_assert(sizeof(int64_t) == sizeof(long long int),  // NOLINT
                "long long should be int64");
  long long int res = *address;  // NOLINT
  while (val < res) {
    long long int old = res;                                           // NOLINT
    res = (long long int)atomicCAS((unsigned long long int *)address,  // NOLINT
                                   (unsigned long long int)old,        // NOLINT
                                   (unsigned long long int)val);       // NOLINT
    if (res == old) {
      break;
    }
  }
  return res;
}

CUDA_ATOMIC_WRAPPER(Min, float) {
  if (*address <= val) {
    return *address;
  }

  int *const address_as_i = reinterpret_cast<int *>(address);
  int old = *address_as_i, assumed;

  do {
    assumed = old;
    if (__int_as_float(assumed) <= val) {
      break;
    }

    old = atomicCAS(address_as_i, assumed, __float_as_int(val));
  } while (assumed != old);

  return __int_as_float(old);
}

CUDA_ATOMIC_WRAPPER(Min, double) {
  if (*address <= val) {
    return *address;
  }

  unsigned long long int *const address_as_ull =            // NOLINT
      reinterpret_cast<unsigned long long int *>(address);  // NOLINT
  unsigned long long int old = *address_as_ull, assumed;    // NOLINT

  do {
    assumed = old;
    if (__longlong_as_double(assumed) <= val) {
      break;
    }

    old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val));
  } while (assumed != old);

  return __longlong_as_double(old);
}

#ifdef PADDLE_CUDA_FP16
inline static __device__ uint32_t min_to_low_half(uint32_t val, float x) {
585
  phi::dtype::float16 low_half;
W
Wang Xin 已提交
586 587
  // The float16 in lower 16bits
  low_half.x = static_cast<uint16_t>(val & 0xFFFFu);
588 589
  low_half =
      static_cast<phi::dtype::float16>(min(static_cast<float>(low_half), x));
W
Wang Xin 已提交
590 591 592 593
  return (val & 0xFFFF0000u) | low_half.x;
}

inline static __device__ uint32_t min_to_high_half(uint32_t val, float x) {
594
  phi::dtype::float16 high_half;
W
Wang Xin 已提交
595 596
  // The float16 in higher 16bits
  high_half.x = static_cast<uint16_t>(val >> 16);
597 598
  high_half =
      static_cast<phi::dtype::float16>(min(static_cast<float>(high_half), x));
W
Wang Xin 已提交
599 600 601
  return (val & 0xFFFFu) | (static_cast<uint32_t>(high_half.x) << 16);
}

602
CUDA_ATOMIC_WRAPPER(Min, phi::dtype::float16) {
W
Wang Xin 已提交
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
  if (*address <= val) {
    return *address;
  }
  uint32_t *address_as_ui = reinterpret_cast<uint32_t *>(
      reinterpret_cast<char *>(address) -
      (reinterpret_cast<uintptr_t>(address) & 0x02));
  float val_f = static_cast<float>(val);
  uint32_t old = *address_as_ui;
  uint32_t assumed;
  if (((uintptr_t)address & 0x02) == 0) {
    // The float16 value stay at lower 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(address_as_ui, assumed, min_to_low_half(assumed, val_f));
    } while (old != assumed);
618
    phi::dtype::float16 ret;
W
Wang Xin 已提交
619 620 621 622 623 624 625 626
    ret.x = old & 0xFFFFu;
    return ret;
  } else {
    // The float16 value stay at higher 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(address_as_ui, assumed, min_to_high_half(assumed, val_f));
    } while (old != assumed);
627
    phi::dtype::float16 ret;
W
Wang Xin 已提交
628 629 630 631 632 633
    ret.x = old >> 16;
    return ret;
  }
}
#endif

C
co63oc 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
inline static __device__ uint32_t bf16_min_to_low_half(uint32_t val, float x) {
  phi::dtype::bfloat16 low_half;
  // The bfloat16 in lower 16bits
  low_half.x = static_cast<uint16_t>(val & 0xFFFFu);
  low_half =
      static_cast<phi::dtype::bfloat16>(min(static_cast<float>(low_half), x));
  return (val & 0xFFFF0000u) | low_half.x;
}

inline static __device__ uint32_t bf16_min_to_high_half(uint32_t val, float x) {
  phi::dtype::bfloat16 high_half;
  // The bfloat16 in higher 16bits
  high_half.x = static_cast<uint16_t>(val >> 16);
  high_half =
      static_cast<phi::dtype::bfloat16>(min(static_cast<float>(high_half), x));
  return (val & 0xFFFFu) | (static_cast<uint32_t>(high_half.x) << 16);
}

CUDA_ATOMIC_WRAPPER(Min, phi::dtype::bfloat16) {
  if (*address <= val) {
    return *address;
  }
  uint32_t *address_as_ui = reinterpret_cast<uint32_t *>(
      reinterpret_cast<char *>(address) -
      (reinterpret_cast<uintptr_t>(address) & 0x02));
  float val_f = static_cast<float>(val);
  uint32_t old = *address_as_ui;
  uint32_t assumed;
  if (((uintptr_t)address & 0x02) == 0) {
    // The bfloat16 value stay at lower 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(
          address_as_ui, assumed, bf16_min_to_low_half(assumed, val_f));
    } while (old != assumed);
    phi::dtype::bfloat16 ret;
    ret.x = old & 0xFFFFu;
    return ret;
  } else {
    // The bfloat16 value stay at higher 16 bits of the address.
    do {
      assumed = old;
      old = atomicCAS(
          address_as_ui, assumed, bf16_min_to_high_half(assumed, val_f));
    } while (old != assumed);
    phi::dtype::bfloat16 ret;
    ret.x = old >> 16;
    return ret;
  }
}

W
Wang Xin 已提交
685 686 687 688 689 690
#ifdef PADDLE_WITH_CUDA
/*
 * One thead block deals with elementwise atomicAdd for vector of len.
 * @in: [x1, x2, x3, ...]
 * @out:[y1+x1, y2+x2, y3+x3, ...]
 * */
691 692 693 694

template <typename T,
          typename std::enable_if<!VecAtomicAddHelper<T>::kIsAvailable>::type
              * = nullptr>
W
Wang Xin 已提交
695 696 697 698 699 700 701 702
__device__ __forceinline__ void VectorizedAtomicAddPerBlock(
    const int64_t len, int tid, int threads_per_block, const T *in, T *out) {
  for (int i = tid; i < len; i += threads_per_block) {
    CudaAtomicAdd(&out[i], in[i]);
  }
}

// Note: assume that len is even. If len is odd, call fastAtomicAdd directly.
703 704 705
template <typename T,
          typename std::enable_if<VecAtomicAddHelper<T>::kIsAvailable>::type * =
              nullptr>
W
Wang Xin 已提交
706 707 708 709 710
__device__ __forceinline__ void VectorizedAtomicAddPerBlock(
    const int64_t len, int tid, int threads_per_block, const T *in, T *out) {
  int i = 0;
  int loops = len / 2 * 2;

711 712
  using NVT = typename VecAtomicAddHelper<T>::NVT;
  using NVVec2T = typename VecAtomicAddHelper<T>::NVVec2T;
W
Wang Xin 已提交
713
  bool aligned_half2 =
714
      (reinterpret_cast<std::uintptr_t>(out) % sizeof(NVT) == 0);
W
Wang Xin 已提交
715 716 717

  if (aligned_half2) {
    for (i = tid * 2; i < loops; i += threads_per_block * 2) {
718
      NVVec2T value2;
W
Wang Xin 已提交
719 720
      T value_1 = in[i];
      T value_2 = in[i + 1];
721 722 723
      value2.x = *reinterpret_cast<NVT *>(&value_1);
      value2.y = *reinterpret_cast<NVT *>(&value_2);
      atomicAdd(reinterpret_cast<NVVec2T *>(&out[i]), value2);
W
Wang Xin 已提交
724 725 726 727 728 729 730 731 732 733
    }
    for (; i < len; i += threads_per_block) {
      fastAtomicAdd(out, i, len, in[i]);
    }
  } else {
    for (int i = tid; i < len; i += threads_per_block) {
      fastAtomicAdd(out, i, len, in[i]);
    }
  }
}
734

W
Wang Xin 已提交
735 736
#endif
}  // namespace phi