reduce_op.cu.h 28.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <algorithm>
#include <cmath>
#include <numeric>
#include <set>
#include <vector>

#ifdef __NVCC__
#include "cub/cub.cuh"
#endif

#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif

#include "paddle/fluid/framework/array.h"
33
#include "paddle/fluid/framework/op_registry.h"
34 35
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
36
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
37
#include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h"
38
#include "paddle/fluid/platform/cuda_device_function.h"
39
#include "paddle/fluid/platform/fast_divmod.h"
40

41 42
// Reduce split or not, Whether to use ReduceHigherDim
#define REDUCE_SPLIT_BOUNDARY 512
43
#define REDUCE_VEC_SIZE 4
44

45 46 47
namespace paddle {
namespace operators {

48
namespace kps = paddle::operators::kernel_primitives;
49

50
namespace details {
51 52 53 54 55 56 57 58 59 60

static inline int GetLastPow2(int n) {
  n |= (n >> 1);
  n |= (n >> 2);
  n |= (n >> 4);
  n |= (n >> 8);
  n |= (n >> 16);
  return std::max(1, n - (n >> 1));
}

61 62
static inline int64_t AlignUp(int64_t a, int64_t b) { return (a + b - 1) / b; }

63 64 65
// get strides of x_dim, reduce_dim and left_dim for reduceLastDim and reduceAny
static inline std::vector<int> GetDimStrides(const std::vector<int>& dims,
                                             const std::vector<int>& idx) {
66 67 68 69 70 71 72 73 74 75
  int n = static_cast<int>(idx.size());
  if (n == 0) return std::vector<int>();
  std::vector<int> strides(n);
  strides.back() = 1;
  for (int i = n - 2; i >= 0; --i) {
    strides[i] = strides[i + 1] * dims[idx[i + 1]];
  }
  return strides;
}

76 77
// get blockDim for reduceLastDim and reduceAny
static inline int GetBlockDim(int block_dim) {
78 79 80
  return block_dim >= kps::details::kReduceMaxThread
             ? kps::details::kReduceMaxThread
             : GetLastPow2(block_dim);
81 82
}

83 84
// check reduce rand is valid
static inline void CheckReduceRank(int reduce_rank, int rank) {
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
  if (rank % 2 == 0) {
    PADDLE_ENFORCE_EQ(reduce_rank, rank / 2,
                      platform::errors::InvalidArgument(
                          "ReduceOp: invalid reduce rank. When rank = %d, "
                          "reduce_rank must be %d, but got %d.",
                          rank, rank / 2, reduce_rank));
  } else {
    auto lower_rank = (rank - 1) / 2;
    auto upper_rank = (rank + 1) / 2;
    PADDLE_ENFORCE_EQ(
        reduce_rank == lower_rank || reduce_rank == upper_rank, true,
        platform::errors::InvalidArgument(
            "ReduceOp: invalid reduce rank. When rank = %d, reduce_rank "
            "must be %d or %d, but got %d.",
            rank, lower_rank, upper_rank, reduce_rank));
  }
}

103
// convert dims from vector to array
104
template <typename T, size_t ElementCount, typename VectorLikeType>
105
static inline paddle::framework::Array<T, ElementCount> VectorToArray(
106
    const VectorLikeType& vec) {
107
  PADDLE_ENFORCE_LE(vec.size(), ElementCount,
108 109
                    platform::errors::InvalidArgument(
                        "Cub reduce Array: size not match. Received "
110
                        "vec.size() %d > ElementCount %d.",
111 112 113
                        vec.size(), ElementCount));
  size_t n = static_cast<size_t>(vec.size());
  paddle::framework::Array<T, ElementCount> ret;
114 115 116
  for (size_t i = 0; i < n; ++i) {
    ret[i] = vec[i];
  }
117 118 119
  return ret;
}

120
}  // namespace details
121

122
using Tensor = framework::Tensor;
123
constexpr int kMaxRank = framework::DDim::kMaxRank;
124

125
enum ReduceType {
126
  kReduceLastDim = 0x01,    // when reduce_dim[0] == x_dim.size() - 1;
127
  kReduceHigherDim = 0x02,  // ReduceFirstDim or reduceSecondDim
128
  kReduceAny = 0x03,        // when reduce_dim.size() > 1
129 130
};

131 132 133 134 135
struct IndexCalculator {
  IndexCalculator(int dim, const std::vector<int>& cal_dims,
                  const std::vector<int>& cal_strides,
                  const std::vector<int>& full_strides)
      : dim(dim) {
136 137
    dims = details::VectorToArray<int, kMaxRank>(cal_dims);
    strides = details::VectorToArray<int, kMaxRank>(full_strides);
138
    std::vector<platform::FastDivMod> cal_divmoders;
139 140
    // fast divmod
    for (auto i : cal_strides) {
141
      cal_divmoders.push_back(platform::FastDivMod(i));
142
    }
143
    divmoders =
144
        details::VectorToArray<platform::FastDivMod, kMaxRank>(cal_divmoders);
145 146
  }

147
  __device__ inline int operator()(int offset) const {
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
    int index = 0;
#pragma unroll
    for (int i = 0; i < kMaxRank; ++i) {
      if (i == dim) {
        break;
      }
      auto divmod = divmoders[i].Divmod(offset);
      index += (divmod.val[0] * strides[dims[i]]);
      offset = divmod.val[1];
    }
    return index;
  }

  int dim;
  framework::Array<int, kMaxRank> dims;
  framework::Array<int, kMaxRank> strides;
164
  framework::Array<platform::FastDivMod, kMaxRank> divmoders;
165 166
};

167 168 169 170 171 172 173 174 175
// when reduce_type == kReduceLastDim this struct will be used
// for higher performance
struct LastDimIndexCal {
  explicit LastDimIndexCal(int num) : stride(num) {}

  __device__ inline int operator()(int index) const { return index * stride; }
  int stride;
};

176 177 178
// reduce config
template <typename Ty>
struct ReduceConfig {
179 180 181
  ReduceConfig(const std::vector<int>& origin_reduce_dims,
               const std::vector<int>& origin_x_dim)
      : reduce_dims_origin(origin_reduce_dims), x_dim(origin_x_dim) {}
182 183 184 185 186

  // get the parameters of reduceKernel
  void Run() {
    // step1: update the reduce_dim left_dim and x_dim
    SetReduceDim();
187

188 189
    // step2: get the strides of dim for reduceAny and reduceLastDim
    SetStrides();
190

191 192
    // step3: get the type of reduce
    SetReduceType();
193

194 195 196 197 198 199
    // step4: set the block and grid for launch kernel
    SetBlockDim();
  }

  // when should_reduce_again is true, we need malloc temp space for temp data
  void SetOutputData(Ty* y_data, const platform::Place& place,
200
                     framework::Tensor* tmp) {
201
    if (should_reduce_again) {
202
      output_data = tmp->mutable_data<Ty>(
203
          framework::make_ddim(
204
              {static_cast<int64_t>(left_num * grid.z * grid.y * sizeof(Ty))}),
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
          place);
    } else {
      output_data = y_data;
    }
  }

 private:
  // set reduce_dim, left_dim and update x_dim
  // eg: x_dim = [2, 4, 6] origin_reduce_dims = [0, 1]
  //     --SetReduceDim--> x_dim = [8,6], reduce_dim = [0], left_dim = [1]
  void SetReduceDim() {
    std::set<int> reduce_set;
    for (auto e : reduce_dims_origin) {
      auto pos = e >= 0 ? e : e + x_dim.size();
      reduce_set.insert(pos);
    }
221

222 223
    std::vector<int> reduce_dim_temp(reduce_set.begin(), reduce_set.end());
    std::sort(reduce_dim_temp.begin(), reduce_dim_temp.end());
224 225 226 227 228 229 230 231 232 233

    // update reduce_dim and x_dim
    std::vector<int> x_new_dim;

    reduce_dim.push_back(reduce_dim_temp[0]);
    x_new_dim.push_back(x_dim[0]);

    int idx_reduce = 1;
    int num = 0;

234
    if (reduce_dim_temp.size() > 1) {
235 236 237 238 239 240 241 242 243 244 245 246 247 248
      for (int i = 1; i < x_dim.size(); i++) {
        if ((idx_reduce < reduce_dim_temp.size()) &&
            (i == reduce_dim_temp[idx_reduce])) {
          int result =
              reduce_dim_temp[idx_reduce] - reduce_dim[reduce_dim.size() - 1];
          bool is_equal = ((result - num) == 1);
          if (is_equal) {
            x_new_dim[x_new_dim.size() - 1] *= x_dim[i];
            num++;
          } else {
            reduce_dim.push_back(reduce_dim_temp[idx_reduce] - num);
            x_new_dim.push_back(x_dim[i]);
          }
          idx_reduce++;
249
        } else {
250
          x_new_dim.push_back(x_dim[i]);
251 252 253
        }
      }
    } else {
254
      x_new_dim = x_dim;
255 256
    }

257 258 259 260 261
    // update x_dim
    x_dim = x_new_dim;
    std::vector<int>().swap(x_new_dim);

    std::vector<int> reduce_dim_new;
262 263 264 265 266
    int is_reduced = 0;
    for (auto e : reduce_dim) {
      is_reduced |= 1 << e;
    }

267 268
    std::vector<int>().swap(reduce_dim);

269 270
    for (int i = 0; i < x_dim.size(); i++) {
      if ((i == 0) || (((is_reduced >> i) ^ (is_reduced >> (i - 1))) & 1)) {
271
        x_new_dim.push_back(x_dim[i]);
272
        if ((is_reduced >> i) & 1)
273
          reduce_dim_new.push_back(x_new_dim.size() - 1);
274
      } else {
275
        x_new_dim[x_new_dim.size() - 1] *= x_dim[i];
276 277 278
      }
    }

279 280
    x_dim = x_new_dim;
    reduce_dim = reduce_dim_new;
281 282 283 284 285 286 287 288 289 290 291 292 293

    int x_rank = static_cast<int>(x_dim.size());
    std::set<int> left_set;

    for (int i = 0; i < x_rank; ++i) {
      left_set.insert(i);
    }

    for (auto e : reduce_dim) {
      left_set.erase(e);
    }

    left_dim.assign(left_set.begin(), left_set.end());
294 295

    // if the last dim gets involved in reduction
296
    reduce_last_dim = (reduce_dim.back() == x_dim.size() - 1);
297 298 299 300 301 302 303 304 305 306 307 308
  }

  // set x_strides, reduce_strides, left_strides for reduceLastDim and reduceAny
  // eg: x_dim = [8, 6], reduce_dim = [0], left_dim = [1]
  //     --SetStrides--> x_strides= [6,1], reduce_strides = [1],
  //     left_strides = [1]
  void SetStrides() {
    std::vector<int> idx_dim;
    for (int i = 0; i < x_dim.size(); i++) {
      idx_dim.push_back(i);
    }

309 310 311
    x_strides = details::GetDimStrides(x_dim, idx_dim);
    reduce_strides = details::GetDimStrides(x_dim, reduce_dim);
    left_strides = details::GetDimStrides(x_dim, left_dim);
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
    reduce_num = reduce_strides[0] * x_dim[reduce_dim[0]];

    left_num = 1;
    if (left_dim.size()) {
      left_num = left_strides[0] * x_dim[left_dim[0]];
    }
  }

  // get the reduceType
  // eg: x_dim = [8, 6] reduce_dim = [0] --> ReduceHigherDim -->reduceFirstDim
  //     x_dim = [8, 6] reduce_dim = [1] --> reduceLastDim
  //     x_dim = [8] reduce_dim = [0] --> reduceAll
  //     x_dim = [8, 6, 4, 2] reduce_dim = [0, 2] --> reduceAny
  void SetReduceType() {
    int rank = x_dim.size();
    int reduce_rank = reduce_dim.size();
328 329 330
    bool is_last_dim =
        (rank == 2) && (reduce_rank == 1) && (reduce_dim[0] == 1);
    if (rank == reduce_rank || is_last_dim) {
331
      reduce_type = static_cast<int>(ReduceType::kReduceLastDim);
332
    } else if (reduce_rank == 1) {
333 334 335 336 337 338 339
      // ReduceFirstDim and reduceSecondDim
      reduce_type = static_cast<int>(ReduceType::kReduceHigherDim);
    } else {
      reduce_type = static_cast<int>(ReduceType::kReduceAny);
    }
  }

340 341 342
  void SetBlockDimForReduceAny(dim3* block_dim, dim3* grid_dim) {
    constexpr int min_reduce_num_per_thread = 16;
    constexpr int max_reduce_num_per_thread = 256;
343
    constexpr int max_num_threads = kps::details::kReduceMaxThread;
344 345

    // set block size.
346
    // 1. If reduce_last_dim == true, all the threads whose threadIdx.y are same
347 348
    //    will process the reduction for one output.
    //    The number of output for one block is blockDim.y;
349
    // 2. If reduce_last_dim == false, different threadIdx.x will process
350 351 352 353
    //    different reduction and gets the output separately. If it is
    //    necessary, it should reduce in block y.
    //    The number of output for one block is blockDim.x;
    int block_x, block_y;
354
    int grid_num, reduce_num_per_thread;
355 356 357
    if (reduce_last_dim) {
      block_x = details::GetBlockDim(reduce_num);
      block_y = details::GetBlockDim(left_num);
358 359 360
      block_dim->x = block_x;
      block_dim->y =
          std::min(block_y, static_cast<int>(max_num_threads / block_dim->x));
361 362
      grid_num = details::AlignUp(left_num, block_dim->y);
      reduce_num_per_thread = details::AlignUp(reduce_num, block_dim->x);
363
    } else {
364 365
      block_x = details::GetBlockDim(left_num);
      block_y = details::GetBlockDim(reduce_num);
366 367 368 369 370
      block_dim->x = std::min(block_x, 32);
      block_dim->y =
          std::min(block_y, static_cast<int>(max_num_threads / block_dim->x));
      block_dim->x =
          std::min(block_x, static_cast<int>(max_num_threads / block_dim->y));
371 372
      grid_num = details::AlignUp(left_num, block_dim->x);
      reduce_num_per_thread = details::AlignUp(reduce_num, block_dim->y);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
    }
    int device_id = platform::GetCurrentDeviceId();
    int max_mp = platform::GetCUDAMultiProcessors(device_id);
    int max_threads_per_mp =
        platform::GetCUDAMaxThreadsPerMultiProcessor(device_id);
    int max_threads = max_threads_per_mp * max_mp;
    int num_threads = block_dim->x * block_dim->y;
    int max_num_blocks = max_threads / num_threads;

    // set grid size.
    // Whether to set grid.y larger than 1, there are 3 following rules:
    // 1. The number that each thread process should no less than
    //    min_reduce_num_per_threadbut no more than max_reduce_num_per_thread;
    // 2. It should maximize the utilization of SM.
    // So we choose the minimum between input_split_num_1 and input_split_num_3
    // to make each thread process as mush data as possible. Meanwhile,
    // the number cannot be larger than max_reduce_num_per_thread, so we
    // choose the maximum between the result above and input_split_num_2.
    int input_split_num_1 =
392
        details::AlignUp(reduce_num_per_thread, min_reduce_num_per_thread);
393
    int input_split_num_2 =
394 395
        details::AlignUp(reduce_num_per_thread, max_reduce_num_per_thread);
    int input_split_num_3 = details::AlignUp(max_num_blocks, grid_num);
396 397 398 399 400 401 402 403 404 405

    grid_dim->x = grid_num;
    grid_dim->y = std::max(std::min(input_split_num_1, input_split_num_3),
                           input_split_num_2);
    // if grid.y > 1, we need launch reduce kernel again.
    if (grid_dim->y > 1) {
      should_reduce_again = true;
    }
  }

406 407 408 409 410 411
  // set block and grid for launch kernel
  // for ReduceHigherDim: if block is enough -> splite reduce_num
  //                     else init block(32, 1) grid(block_num, 1)
  // for others: block(block_num, 1) , grid(left_num, 1)
  void SetBlockDim() {
    // init
412
    int block_num = details::GetBlockDim(reduce_num);
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
    should_reduce_again = false;

    dim3 block_dim(block_num, 1);
    dim3 grid_dim(left_num, 1);
    blocking_size = reduce_num;

    if (reduce_type == ReduceType::kReduceHigherDim) {
      int last_dim_num = x_dim.back();
      // update left_num
      int grid_z = left_num / last_dim_num;
      left_num = last_dim_num;

      block_dim.z = 1;
      grid_dim.z = grid_z;

      int device_id = platform::GetCurrentDeviceId();
      int max_mp = platform::GetCUDAMultiProcessors(device_id);
      int max_threads_per_mp =
          platform::GetCUDAMaxThreadsPerMultiProcessor(device_id);
      int max_threads = max_threads_per_mp * max_mp;

      // init
      int num_block = (max_threads / left_num);

437
      if (num_block > 1 && reduce_num >= REDUCE_SPLIT_BOUNDARY) {
438
        blocking_size = details::GetLastPow2(reduce_num / num_block);
439 440

        if (blocking_size <= 1) {
441
          blocking_size = details::GetLastPow2(sqrt(reduce_num));
442 443 444 445 446 447
        } else if (blocking_size * 2 < reduce_num) {
          blocking_size *= 2;
        }

        should_reduce_again = true;

448
        block_dim.x = details::GetBlockDim(left_num);
449 450 451 452 453
        block_dim.y = 1;
        grid_dim.x = (left_num + block_dim.x - 1) / block_dim.x;
        grid_dim.y = (reduce_num + blocking_size - 1) / blocking_size;

      } else {
454
        block_dim.x = details::GetBlockDim(left_num);
455 456 457 458 459
        block_dim.y = 1;
        blocking_size = reduce_num;
        grid_dim.x = (left_num + block_dim.x - 1) / block_dim.x;
        grid_dim.y = 1;
      }
460
    } else {
461
      SetBlockDimForReduceAny(&block_dim, &grid_dim);
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
    }

    block = block_dim;
    grid = grid_dim;
  }

 public:
  std::vector<int> reduce_dims_origin;
  std::vector<int> reduce_dim;
  std::vector<int> x_dim;
  std::vector<int> left_dim;
  std::vector<int> x_strides;
  std::vector<int> left_strides;
  std::vector<int> reduce_strides;

  int reduce_type;
  int reduce_num;
  int left_num;
  int blocking_size;
  bool should_reduce_again;
482
  bool reduce_last_dim;
483 484 485 486 487 488

  Ty* output_data;

  dim3 block;
  dim3 grid;
};
489 490 491
/* size : how many colonms left have to be reduced
 * loop : how many rows data have to be reduced
 * block_size: max rows this block to reduce
492
 */
493
template <typename Tx, typename Ty, typename MPType, typename ReduceOp,
494 495 496 497 498 499 500
          typename TransformOp, bool IsBoundary = false>
__device__ void HigherDimDealSegment(const Tx* x, Ty* y, ReduceOp reducer,
                                     TransformOp transformer, MPType init,
                                     int reduce_num, int left_num,
                                     int block_size) {
  const int NY = 1;
  int idx = blockIdx.x * blockDim.x;
501
  int idy = blockIdx.y * block_size;
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
  // block_offset of rows
  Tx reduce_input[NY];
  MPType reduce_compute[NY];
  MPType result = init;
  // the offset of this block
  int block_offset = idy * left_num + idx + blockIdx.z * reduce_num * left_num;
  const Tx* input = x + block_offset;
  int store_offset =
      blockIdx.y * left_num + blockIdx.z * gridDim.y * left_num + idx;
  // how many columns left
  int size = left_num - idx;
  // how many rows have to be reduced
  int loop = reduce_num - idy;
  loop = loop > block_size ? block_size : loop;

  for (int loop_index = 0; loop_index < loop; loop_index += NY) {
    kps::ReadData<Tx, Tx, 1, NY, 1, IsBoundary>(
        &reduce_input[0], input + loop_index * left_num, size, NY, 1, left_num);
    kps::ElementwiseUnary<Tx, MPType, REDUCE_VEC_SIZE, 1, 1, TransformOp>(
        &reduce_compute[0], &reduce_input[0], transformer);
    kps::Reduce<MPType, NY, 1, 1, ReduceOp,
                kps::details::ReduceMode::kLocalMode>(
        &result, &reduce_compute[0], reducer, false);
525
  }
526 527 528

  Ty temp_data = static_cast<Ty>(result);
  kps::WriteData<Ty, 1, 1, 1, IsBoundary>(y + store_offset, &temp_data, size);
529 530
}

531
// when reduce_dim.size() == 1 and reduce_dim[0] == x_dim.size() - 1, or
532 533
// when reduce_dim.size() != 1 and reduce_dim.size() != x_dim.size(), this
// function will be used
534
template <typename Tx, typename Ty, typename MPType, typename ReduceOp,
535 536 537 538 539 540 541
          typename TransformOp, typename Calculator>
__global__ void ReduceAnyKernel(const Tx* x, Ty* y, ReduceOp reducer,
                                TransformOp transformer, MPType init,
                                int reduce_num, int left_num,
                                bool reduce_last_dim,
                                const Calculator reduce_index_calculator,
                                const Calculator left_index_calculator) {
542
  int input_idx, left_idx, stride;
543 544 545
  int block_size = 0;
  bool need_store = true;
  int tid = 0;
546
  // the last dim gets involved in reduction
547 548
  if (reduce_last_dim) {
    input_idx = blockIdx.y * blockDim.x;
549
    left_idx = blockIdx.x * blockDim.y + threadIdx.y;
550
    stride = gridDim.y * blockDim.x;
551 552 553
    block_size = blockDim.x;
    need_store = (threadIdx.x == 0) && (left_idx < left_num);
    tid = threadIdx.x;
554
  } else {
555
    input_idx = blockIdx.y * blockDim.y;
556 557
    left_idx = blockIdx.x * blockDim.x + threadIdx.x;
    stride = gridDim.y * blockDim.y;
558 559 560
    block_size = blockDim.y;
    need_store = (threadIdx.y == 0) && (left_idx < left_num);
    tid = threadIdx.y;
561
  }
562
  int store_offset = blockIdx.y * left_num + left_idx;
563
  // calculate the offset, means the addr where each thread really start.
564
  int input_offset = left_index_calculator(left_idx);
565
  const Tx* input = x + input_offset;
566
  MPType reduce_var = init;
567
  Ty store_data;
568

569 570 571 572
  // 1. reduce for each thread
  if (left_idx < left_num) {
    // load REDUCE_VEC_SIZE data once, and then compute
    Tx input_reg[REDUCE_VEC_SIZE];
573
    MPType input_compute[REDUCE_VEC_SIZE];
574
    int bound = reduce_num - (REDUCE_VEC_SIZE - 1) * stride;
575 576 577 578 579 580 581 582 583 584
    for (; input_idx + block_size < bound;
         input_idx += REDUCE_VEC_SIZE * stride) {
      kps::ReadDataReduce<Tx, 1, REDUCE_VEC_SIZE, 1, 1, Calculator>(
          &input_reg[0], input, input_idx, reduce_index_calculator, 1,
          reduce_num, 1, stride, reduce_last_dim);
      kps::ElementwiseUnary<Tx, MPType, REDUCE_VEC_SIZE, 1, 1, TransformOp>(
          &input_compute[0], &input_reg[0], transformer);
      kps::Reduce<MPType, REDUCE_VEC_SIZE, 1, 1, ReduceOp,
                  kps::details::ReduceMode::kLocalMode>(
          &reduce_var, &input_compute[0], reducer, reduce_last_dim);
585 586
    }

587 588 589 590 591
    kps::Init<MPType, REDUCE_VEC_SIZE>(&input_compute[0], init);
    kps::ReadDataReduce<Tx, 1, REDUCE_VEC_SIZE, 1, 1, Calculator, true>(
        &input_reg[0], input, input_idx, reduce_index_calculator, 1, reduce_num,
        1, stride, reduce_last_dim);
    input_idx += tid;
592 593 594 595 596
#pragma unroll
    for (int i = 0; i < REDUCE_VEC_SIZE; ++i) {
      if (input_idx >= reduce_num) {
        break;
      }
597
      input_compute[i] = static_cast<MPType>(transformer(input_reg[i]));
598 599
      input_idx += stride;
    }
600 601 602
    kps::Reduce<MPType, REDUCE_VEC_SIZE, 1, 1, ReduceOp,
                kps::details::ReduceMode::kLocalMode>(
        &reduce_var, &input_compute[0], reducer, reduce_last_dim);
603
  }
604

605 606 607 608
  kps::Reduce<MPType, 1, 1, 1, ReduceOp, kps::details::kGlobalMode>(
      &reduce_var, &reduce_var, reducer, reduce_last_dim);
  if (need_store) {
    y[store_offset] = static_cast<Ty>(reduce_var);
609 610 611
  }
}

612 613
template <typename Tx, typename Ty, typename MPType, typename ReduceOp,
          typename TransformOp>
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
__global__ void ReduceHigherDimKernel(const Tx* x, Ty* y, ReduceOp reducer,
                                      TransformOp transformer, MPType init,
                                      int reduce_num, int left_num,
                                      int blocking_size) {
  // when reduce_dim.size() == 1 and reduce_dim[0] != x_dim.size() - 1, this
  // function will be used
  // eg: x_dim = {nz, ny, nx}, nx != 1, axis can be 0 or 1
  //     if axis = 1 then grid.z = nz, grid.y = ny / block_size, grid.x = nx /
  //     32
  //     else grid.z = 1, grid.y = ny / block_size, grid.x = nx /32
  int idx = blockIdx.x * blockDim.x;
  int size = left_num - idx;
  if (size >= blockDim.x) {  // complete segment
    HigherDimDealSegment<Tx, Ty, MPType, ReduceOp, TransformOp>(
        x, y, reducer, transformer, init, reduce_num, left_num, blocking_size);
  } else {
    HigherDimDealSegment<Tx, Ty, MPType, ReduceOp, TransformOp, true>(
631 632 633 634
        x, y, reducer, transformer, init, reduce_num, left_num, blocking_size);
  }
}

635
template <typename Tx, typename Ty, typename MPType, typename ReduceOp>
636
static void LaunchReduceKernel(const Tx* x_data, Ty* y_data,
637
                               const ReduceOp& reducer, MPType init,
638 639
                               gpuStream_t stream, ReduceConfig<Ty> config) {
  using TransformOp = typename ReduceOp::Transformer;
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667

  if (config.reduce_type == kReduceLastDim) {
    int stride_reduce = 1;
    int stride_left = config.reduce_num;
    // for higher performance
    auto reduce_index_calculator = LastDimIndexCal(stride_reduce);
    auto left_index_calculator = LastDimIndexCal(stride_left);

    ReduceAnyKernel<Tx, Ty, MPType, ReduceOp, TransformOp,
                    LastDimIndexCal><<<config.grid, config.block, 0, stream>>>(
        x_data, config.output_data, reducer, TransformOp(config.reduce_num),
        init, config.reduce_num, config.left_num, config.reduce_last_dim,
        reduce_index_calculator, left_index_calculator);

  } else {
    int reduce_rank = config.reduce_strides.size();
    int left_rank = config.left_strides.size();
    auto reduce_index_calculator =
        IndexCalculator(reduce_rank, config.reduce_dim, config.reduce_strides,
                        config.x_strides);
    auto left_index_calculator = IndexCalculator(
        left_rank, config.left_dim, config.left_strides, config.x_strides);
    ReduceAnyKernel<Tx, Ty, MPType, ReduceOp, TransformOp,
                    IndexCalculator><<<config.grid, config.block, 0, stream>>>(
        x_data, config.output_data, reducer, TransformOp(config.reduce_num),
        init, config.reduce_num, config.left_num, config.reduce_last_dim,
        reduce_index_calculator, left_index_calculator);
  }
668 669

  if (config.should_reduce_again) {
670 671
    dim3 block;
    dim3 grid;
672
    if (config.reduce_last_dim) {
673
      block = dim3(32, 1, 1);
674
      grid = dim3(details::AlignUp(config.left_num, 32), 1, 1);
675 676 677 678
    } else {
      block = dim3(config.block.x, 1, 1);
      grid = dim3(config.grid.x, 1, config.grid.z);
    }
679

680
    ReduceHigherDimKernel<
681
        Ty, Ty, MPType, ReduceOp,
682
        kps::details::IdentityFunctor<Ty, MPType>><<<grid, block, 0, stream>>>(
683
        config.output_data, y_data, reducer,
684 685
        kps::details::IdentityFunctor<Ty, MPType>(config.grid.y), init,
        config.grid.y, config.left_num, config.grid.y);
686 687 688
  }
}

689 690 691 692 693
template <typename Tx, typename Ty,
          template <typename, typename> class ReduceOp>
void TensorReduceFunctorImpl(const framework::Tensor& x, framework::Tensor* y,
                             std::vector<int> origin_reduce_dims,
                             gpuStream_t stream) {
694 695
  auto x_dim = framework::vectorize<int>(x.dims());
  auto config = ReduceConfig<Ty>(origin_reduce_dims, x_dim);
696
  config.Run();  // get the parameters of LaunchReduceKernel
697
  int numel = x.numel();
698
  // after config.run()
699
  // SetOutputData for ReduceHigherDim when should_reduce_again is true,
700 701
  // temp_output should be stored temp_data in output_data space or stored in
  // y_data;
702
  framework::Tensor tmp;
703 704
  auto x_data = x.data<Tx>();
  auto y_data = y->mutable_data<Ty>(x.place());
705 706 707 708 709 710 711

  if (config.reduce_num == 1) {
    auto out_dims = y->dims();
    framework::TensorCopy(x, y->place(), y);
    y->Resize(out_dims);
    return;
  }
712 713

  config.SetOutputData(y_data, x.place(), &tmp);
714
  bool use_cub_reduce = (config.reduce_num == numel) &&
715 716 717 718 719
                        (!std::is_same<Tx, paddle::platform::float16>::value);
  if (use_cub_reduce) {
    // launch CUB::Reduce
    using TransformOp = typename ReduceOp<Tx, Ty>::Transformer;
    auto reducer = ReduceOp<Tx, Ty>();
720 721 722 723 724 725 726 727 728 729 730 731 732
    cub::TransformInputIterator<Ty, TransformOp, const Tx*> trans_x(
        x_data, TransformOp(config.reduce_num));
    size_t temp_storage_bytes = 0;
    cub::DeviceReduce::Reduce(nullptr, temp_storage_bytes, trans_x, y_data,
                              config.reduce_num, reducer, reducer.initial(),
                              stream);
    framework::Tensor tmp;
    auto* temp_storage = tmp.mutable_data<uint8_t>(
        framework::make_ddim({static_cast<int64_t>(temp_storage_bytes)}),
        x.place());
    cub::DeviceReduce::Reduce(temp_storage, temp_storage_bytes, trans_x, y_data,
                              config.reduce_num, reducer, reducer.initial(),
                              stream);
733

734 735 736
    return;
  }

737 738
  using MPType = typename details::MPTypeTrait<Ty>::Type;
  auto reducer = ReduceOp<Tx, MPType>();
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
  // launch ReduceHigherDimKernel
  // when reduce_dim.size() == 1 and reduce_dim[0] != x_dim.size() - 1, this
  // function will be used
  // eg: x_dim = {nz, ny, nx}, nx != 1, axis can be 0 or 1
  //     if axis = 1 then grid.z = nz, grid.y = ny / block_size, grid.x = nx /
  //     32
  //     else grid.z = 1, grid.y = ny / block_size, grid.x = nx /32
  if (config.reduce_type == ReduceType::kReduceHigherDim) {
    using TransformOp = typename ReduceOp<Tx, MPType>::Transformer;

    ReduceHigherDimKernel<
        Tx, Ty, MPType, ReduceOp<Tx, MPType>,
        TransformOp><<<config.grid, config.block, 0, stream>>>(
        x_data, config.output_data, reducer, TransformOp(config.reduce_num),
        reducer.initial(), config.reduce_num, config.left_num,
        config.blocking_size);

    if (config.should_reduce_again) {
      dim3 block = dim3(config.block.x, 1, 1);
      dim3 grid = dim3(config.grid.x, 1, config.grid.z);
      ReduceHigherDimKernel<Ty, Ty, MPType, ReduceOp<Tx, MPType>,
                            kps::details::IdentityFunctor<
                                Ty, MPType>><<<grid, block, 0, stream>>>(
          config.output_data, y_data, reducer,
          kps::details::IdentityFunctor<Ty, MPType>(config.grid.y),
          reducer.initial(), config.grid.y, config.left_num, config.grid.y);
    }
    return;
  }

  // when reduce_dim.size() == 1 and reduce_dim[0] == x_dim.size() - 1, or
  // when reduce_dim.size() != 1 and reduce_dim.size() != x_dim.size(), this
  // function will be used
772
  LaunchReduceKernel<Tx, Ty, MPType, ReduceOp<Tx, MPType>>(
773
      x_data, y_data, reducer, reducer.initial(), stream, config);
774 775
}

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
template <typename Tx, template <typename, typename> class ReduceOp>
struct TensorReduceFunc {
  const framework::Tensor& x;
  framework::Tensor* y;
  std::vector<int> origin_reduce_dims;
  gpuStream_t stream;
  TensorReduceFunc(const framework::Tensor& x, framework::Tensor* y,
                   std::vector<int> origin_reduce_dims, gpuStream_t stream)
      : x(x), y(y), origin_reduce_dims(origin_reduce_dims), stream(stream) {}

  template <typename Ty>
  void apply() const {
    TensorReduceFunctorImpl<Tx, Ty, ReduceOp>(x, y, origin_reduce_dims, stream);
  }
};

792 793
}  // namespace operators
}  // namespace paddle