hl_device_functions.cuh 1.5 KB
Newer Older
Z
zhangjinchao01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */


#ifndef HL_DEVICE_FUNCTIONS_CUH_
#define HL_DEVICE_FUNCTIONS_CUH_

19 20 21 22
namespace paddle {

template <class T>
inline __device__ T paddleAtomicAdd(T* address, T val);
Z
zhangjinchao01 已提交
23

24 25 26 27
template <>
inline __device__ float paddleAtomicAdd(float* address, float val) {
  return atomicAdd(address, val);
}
Z
zhangjinchao01 已提交
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
template <>
inline __device__ double paddleAtomicAdd(double* address, double val) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
  return atomicAdd(address, val);
#else
  // NOLINTNEXTLINE
  unsigned long long int* address_as_ull = (unsigned long long int*)address;
  unsigned long long int old = *address_as_ull, assumed; // NOLINT

  do {
    assumed = old;
    old = atomicCAS(address_as_ull,
                    assumed,
                    __double_as_longlong(val +
                    __longlong_as_double(assumed)));
  } while (assumed != old);

  return __longlong_as_double(old);
H
hedaoyuan 已提交
47
#endif
48 49 50
}
}  // namespace paddle

Z
zhangjinchao01 已提交
51 52

#endif /* HL_DEVICE_FUNCTIONS_CUH_ */