gpu_launch_config.h 8.7 KB
Newer Older
W
Wilber 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Used for compute gpu launch parameter config

#pragma once

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)

#ifdef PADDLE_WITH_CUDA
#include <cuda_runtime.h>
#else
#include <hip/hip_runtime.h>
#endif

#include <stddef.h>
28

W
Wilber 已提交
29 30 31
#include <algorithm>
#include <string>
#include <vector>
32

33
#include "glog/logging.h"
34
#include "paddle/phi/backends/gpu/gpu_context.h"
35
#include "paddle/phi/core/enforce.h"
W
Wilber 已提交
36 37 38 39 40

#ifdef __HIPCC__
// HIP results in error or nan if > 256
#define PREDEFINED_BLOCK_SIZE 256
#else
41
// CUDA performs better when thread_per_block is between [64, 512]
W
Wilber 已提交
42 43 44
#define PREDEFINED_BLOCK_SIZE 512
#endif

45
namespace phi {
W
Wilber 已提交
46 47 48
namespace backends {
namespace gpu {

49 50 51
// Limitation of the setting in one dimension of cuda grid.
constexpr int kMultiDimslimit = 65536;

52 53 54 55
template <typename T = int64_t>
inline T DivUp(T a, T b) {
  return (a + b - 1) / b;
}
W
Wilber 已提交
56

57 58
// https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
//   for round integer value into next highest power of 2.
59
inline int64_t RoundToNextHighPowOfTwo(int64_t n, int64_t min_val = 1) {
W
Wilber 已提交
60 61 62 63 64 65
  n--;
  n |= (n >> 1);
  n |= (n >> 2);
  n |= (n >> 4);
  n |= (n >> 8);
  n |= (n >> 16);
66 67 68 69 70 71
  return std::max(min_val, (n + 1));
}

inline int64_t RoundToPowerOfTwo(int64_t n) {
  constexpr int64_t min_val = 32;
  int64_t num = RoundToNextHighPowOfTwo(n, min_val);
W
Wilber 已提交
72
#ifdef __HIPCC__
73
  int64_t max_val = 256;
W
Wilber 已提交
74
#else
75
  int64_t max_val = 1024;
W
Wilber 已提交
76
#endif
77
  return std::min(max_val, num);
W
Wilber 已提交
78 79 80 81 82
}

#ifdef WITH_NV_JETSON
// The number of threads cannot be assigned 1024 in some cases when the device
// is nano or tx2 .
83
inline void ChangeThreadNum(const phi::GPUContext& context,
W
Wilber 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
                            int* num_thread,
                            int alternative_num_thread = 512) {
  if (context.GetComputeCapability() == 53 ||
      context.GetComputeCapability() == 62) {
    *num_thread = alternative_num_thread;
  }
}
#endif

struct GpuLaunchConfig {
 public:
  GpuLaunchConfig() {}

  size_t GetThreadNum() const { return GetBlockSize() * GetGridSize(); }

  size_t GetGridSize() const {
    return block_per_grid.x * block_per_grid.y * block_per_grid.z;
  }

  size_t GetBlockSize() const {
    return thread_per_block.x * thread_per_block.y * thread_per_block.z;
  }

  int compute_capability = 0;
  dim3 thread_per_block = dim3(1, 1, 1);
  dim3 block_per_grid = dim3(1, 1, 1);
};

/* According to NVIDIA, if number of threads per block is 64/128/256/512,
113 114 115
 * cuda performs better. And number of blocks should be greater (at least
 * 2x~4x) than number of SMs. Hence, SM count is took into account within
 * this function to determine the right number of threads per block. */
116
inline GpuLaunchConfig GetGpuLaunchConfig1D(const phi::GPUContext& context,
W
Wilber 已提交
117 118
                                            int64_t numel,
                                            int vec_size = 1) {
119 120 121
  PADDLE_ENFORCE_GE(numel,
                    0,
                    phi::errors::InvalidArgument(
122 123
                        "numel is expected to be greater than or equal 0,"
                        " but received %d.",
124
                        numel));
125 126 127 128 129
  PADDLE_ENFORCE_GE(
      vec_size,
      1,
      phi::errors::InvalidArgument(
          "vec_size is expected greater than 0, but received %d.", vec_size));
W
Wilber 已提交
130 131
  // Get compute_capability
  const int capability = context.GetComputeCapability();
132
  // If thread number per block is 64/128/256/512, cuda performs better.
W
Wilber 已提交
133 134 135 136 137 138 139 140 141
  int limit_threads =
      std::min(PREDEFINED_BLOCK_SIZE, context.GetMaxThreadsPerBlock());
#ifdef WITH_NV_JETSON
  if (capability == 53 || capability == 62) {
    limit_threads = 512;
  }
#endif
  int threads = limit_threads;
  int sm_count = context.GetSMCount();
142
  int64_t active_threads_num = numel / vec_size;
W
Wilber 已提交
143 144 145 146 147 148 149 150 151 152 153
  if (active_threads_num / (sm_count << 1) < limit_threads) {
    // Round up threads number into an exponential multiple of 2, while number
    // of acitve blocks is about twice of SM, to acquire better performance.
    threads = RoundToPowerOfTwo(active_threads_num / (sm_count << 1));
  } else if (active_threads_num / (sm_count << 2) < limit_threads) {
    // Round up threads number into an exponential multiple of 2, while number
    // of acitve blocks is about 4 times of SM, to acquire better performance.
    threads = RoundToPowerOfTwo(active_threads_num / (sm_count << 2));
  }
  // Number of threads per block shall be larger than 64.
  threads = std::max(64, threads);
154
  int blocks = DivUp<int64_t>(DivUp<int64_t>(numel, vec_size), threads);
155 156 157 158
  int limit_blocks = context.GetCUDAMaxGridDimSize()[0];
  if (blocks > limit_blocks) {
    blocks = limit_blocks;
  }
W
Wilber 已提交
159 160 161 162 163

  GpuLaunchConfig config;
  config.thread_per_block.x = threads;
  config.block_per_grid.x = blocks;
  config.compute_capability = capability;
164 165 166 167 168

  VLOG(3) << "Get 1-D launch config: numel=" << numel
          << ", vec_size=" << vec_size << ", block_size=" << threads
          << ", grid_size=" << blocks << ", limit_blocks=" << limit_blocks
          << ", limit_threads=" << limit_threads;
W
Wilber 已提交
169 170 171
  return config;
}

172
inline GpuLaunchConfig GetGpuLaunchConfig2D(const phi::GPUContext& context,
173 174
                                            int64_t x_dim,
                                            int64_t y_dim) {
175 176 177 178 179 180 181 182 183 184 185 186
  PADDLE_ENFORCE_GT(
      x_dim,
      0,
      phi::errors::InvalidArgument("x dim number should greater than 0,"
                                   " but received value is: %d",
                                   x_dim));
  PADDLE_ENFORCE_GT(
      y_dim,
      0,
      phi::errors::InvalidArgument("y dim number should greater than 0,"
                                   " but received value is: %d",
                                   y_dim));
W
Wilber 已提交
187 188

  const int kThreadsPerBlock = 256;
189
  int block_cols = std::min<int64_t>(x_dim, kThreadsPerBlock);
190
  int block_rows = std::max(kThreadsPerBlock / block_cols, 1);
W
Wilber 已提交
191 192

  int max_physical_threads = context.GetMaxPhysicalThreadCount();
193
  const int max_blocks = std::max(max_physical_threads / kThreadsPerBlock, 1);
W
Wilber 已提交
194 195 196 197 198

  GpuLaunchConfig config;
  // Noticed, block size is not align to 32, if needed do it yourself.
  config.thread_per_block = dim3(block_cols, block_rows, 1);

199 200 201
  int grid_x = std::min<int64_t>(DivUp<int64_t>(x_dim, block_cols), max_blocks);
  int grid_y = std::min<int64_t>(max_blocks / grid_x,
                                 std::max<int64_t>(y_dim / block_rows, 1));
W
Wilber 已提交
202 203 204 205 206

  config.block_per_grid = dim3(grid_x, grid_y, 1);
  return config;
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
static inline int GetLastPow2(int n) {
  n |= (n >> 1);
  n |= (n >> 2);
  n |= (n >> 4);
  n |= (n >> 8);
  n |= (n >> 16);
  return std::max(1, n - (n >> 1));
}

inline GpuLaunchConfig GetGpuLaunchConfig3D(const phi::GPUContext& context,
                                            int num_img,
                                            int height,
                                            int width) {
  const int kThreadsPerBlock = 256;
  int max_threads_per_block = context.GetMaxThreadsPerBlock();  // 1024
  int max_threads = std::min(kThreadsPerBlock, max_threads_per_block);

  int block_x = std::min(GetLastPow2(width), max_threads);
  int block_y = std::min(GetLastPow2(height), max_threads / block_x);
  int block_z = std::min(num_img, max_threads / block_x / block_y);

228 229 230 231
  std::array<int, 3> max_grid_dim = context.GetCUDAMaxGridDimSize();
  int grid_x = std::min(max_grid_dim[0], DivUp<int>(width, block_x));
  int grid_y = std::min(max_grid_dim[1], DivUp<int>(height, block_y));
  int grid_z = std::min(max_grid_dim[2], DivUp<int>(num_img, block_z * 4));
232 233 234 235 236 237 238 239 240

  const int capability = context.GetComputeCapability();
  GpuLaunchConfig config;
  config.compute_capability = capability;
  config.thread_per_block = dim3(block_x, block_y, block_z);
  config.block_per_grid = dim3(grid_x, grid_y, grid_z);
  return config;
}

241 242 243 244 245 246 247 248
template <typename Context>
void LimitGridDim(const Context& ctx, dim3* grid_dim) {
  auto max_grid_dim =
      reinterpret_cast<const phi::GPUContext&>(ctx).GetCUDAMaxGridDimSize();
  grid_dim->x = grid_dim->x < max_grid_dim[0] ? grid_dim->x : max_grid_dim[0];
  grid_dim->y = grid_dim->y < max_grid_dim[1] ? grid_dim->y : max_grid_dim[1];
  grid_dim->z = grid_dim->z < max_grid_dim[2] ? grid_dim->z : max_grid_dim[2];
}
W
Wilber 已提交
249 250
}  // namespace gpu
}  // namespace backends
251
}  // namespace phi
W
Wilber 已提交
252 253

#endif