hl_device_functions.cuh 2.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */


#ifndef HL_DEVICE_FUNCTIONS_CUH_
#define HL_DEVICE_FUNCTIONS_CUH_

19 20 21 22
namespace paddle {

template <class T>
inline __device__ T paddleAtomicAdd(T* address, T val);
Z
zhangjinchao01 已提交
23

24 25 26 27
template <>
inline __device__ float paddleAtomicAdd(float* address, float val) {
  return atomicAdd(address, val);
}
Z
zhangjinchao01 已提交
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
template <>
inline __device__ double paddleAtomicAdd(double* address, double val) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
  return atomicAdd(address, val);
#else
  // NOLINTNEXTLINE
  unsigned long long int* address_as_ull = (unsigned long long int*)address;
  unsigned long long int old = *address_as_ull, assumed; // NOLINT

  do {
    assumed = old;
    old = atomicCAS(address_as_ull,
                    assumed,
                    __double_as_longlong(val +
                    __longlong_as_double(assumed)));
  } while (assumed != old);

  return __longlong_as_double(old);
H
hedaoyuan 已提交
47
#endif
48 49 50
}
}  // namespace paddle

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/**
 * @brief  sum reduction
 *
 * @param[in,out]  smem       input data, better to use __shared__ memory.
 * @param[in]      tid        thread index.
 * @param[in]      threads    the total thread number used to reduce,
 *                            such as, blockDim.x.
 *
 * @return smem[0]: the sum of each elements in smem.
 */
__device__ __forceinline__
void simpleReduce(real* smem, int tid, int threads) {
  for (unsigned int s = threads / 2; s > 0; s >>= 1) {
    if (tid < s) {
      smem[tid] += smem[tid + s];
    }
    __syncthreads();
  }
}
Z
zhangjinchao01 已提交
70 71

#endif /* HL_DEVICE_FUNCTIONS_CUH_ */