未验证 提交 e03e4673 编写于 作者: R ronnywang 提交者: GitHub

[ROCM] fix gather_op, sigmoid_cross_entropy_with_logits_op, test=develop (#31467)

上级 b85c8e03
...@@ -29,7 +29,11 @@ namespace operators { ...@@ -29,7 +29,11 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
#ifdef __HIPCC__
static constexpr int kNumCUDAThreads = 256;
#else
static constexpr int kNumCUDAThreads = 512; static constexpr int kNumCUDAThreads = 512;
#endif
static constexpr int kNumMaxinumNumBlocks = 4096; static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) { static inline int NumBlocks(const int N) {
......
...@@ -76,10 +76,17 @@ namespace platform { ...@@ -76,10 +76,17 @@ namespace platform {
* *
*/ */
#ifdef __HIPCC__
#define CUDA_KERNEL_LOOP_TYPE(i, num, index_type) \
int64_t __index__ = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x; \
for (index_type i = __index__; __index__ < (num); \
__index__ += hipBlockDim_x * hipGridDim_x, i = __index__)
#else
#define CUDA_KERNEL_LOOP_TYPE(i, num, index_type) \ #define CUDA_KERNEL_LOOP_TYPE(i, num, index_type) \
int64_t __index__ = blockIdx.x * blockDim.x + threadIdx.x; \ int64_t __index__ = blockIdx.x * blockDim.x + threadIdx.x; \
for (index_type i = __index__; __index__ < (num); \ for (index_type i = __index__; __index__ < (num); \
__index__ += blockDim.x * gridDim.x, i = __index__) __index__ += blockDim.x * gridDim.x, i = __index__)
#endif
#define CUDA_KERNEL_LOOP(i, num) CUDA_KERNEL_LOOP_TYPE(i, num, int) #define CUDA_KERNEL_LOOP(i, num) CUDA_KERNEL_LOOP_TYPE(i, num, int)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册