gpu_info.cc 11.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/platform/gpu_info.h"
16
#include <algorithm>
S
sneaxiy 已提交
17 18
#include <cstdlib>
#include <string>
L
liaogang 已提交
19

20
#include "gflags/gflags.h"
Y
Yi Wang 已提交
21
#include "paddle/fluid/platform/enforce.h"
22
#include "paddle/fluid/string/split.h"
L
liaogang 已提交
23

24
#ifndef _WIN32
P
peizhilin 已提交
25
constexpr static float fraction_of_gpu_memory_to_use = 0.92f;
26
#else
P
peizhilin 已提交
27 28 29
// fraction_of_gpu_memory_to_use cannot be too high on windows,
// since the win32 graphic sub-system can occupy some GPU memory
// which may lead to insufficient memory left for paddle
P
peizhilin 已提交
30
constexpr static float fraction_of_gpu_memory_to_use = 0.5f;
31 32
#endif

Z
zhhsplendid 已提交
33 34
constexpr static float fraction_reserve_gpu_memory = 0.05f;

35
DEFINE_double(fraction_of_gpu_memory_to_use, fraction_of_gpu_memory_to_use,
X
Xin Pan 已提交
36 37 38 39 40
              "Allocate a trunk of gpu memory that is this fraction of the "
              "total gpu memory size. Future memory usage will be allocated "
              "from the trunk. If the trunk doesn't have enough gpu memory, "
              "additional trunks of the same size will be requested from gpu "
              "until the gpu has no memory left for another trunk.");
L
liaogang 已提交
41

42 43 44 45
DEFINE_uint64(
    initial_gpu_memory_in_mb, 0ul,
    "Allocate a trunk of gpu memory whose byte size is specified by "
    "the flag. Future memory usage will be allocated from the "
46
    "trunk. If the trunk doesn't have enough gpu memory, additional "
47 48 49 50 51 52 53 54 55
    "trunks of the gpu memory will be requested from gpu with size "
    "specified by FLAGS_reallocate_gpu_memory_in_mb until the gpu has "
    "no memory left for the additional trunk. Note: if you set this "
    "flag, the memory size set by "
    "FLAGS_fraction_of_gpu_memory_to_use will be overrided by this "
    "flag. If you don't set this flag, PaddlePaddle will use "
    "FLAGS_fraction_of_gpu_memory_to_use to allocate gpu memory");

DEFINE_uint64(reallocate_gpu_memory_in_mb, 0ul,
Z
zhhsplendid 已提交
56 57 58 59
              "If this flag is set, Paddle will reallocate the gpu memory with "
              "size specified by this flag. Else Paddle will reallocate by "
              "FLAGS_fraction_of_gpu_memory_to_use");

60 61 62 63 64 65 66 67 68 69
DEFINE_bool(
    enable_cublas_tensor_op_math, false,
    "The enable_cublas_tensor_op_math indicate whether to use Tensor Core, "
    "but it may loss precision. Currently, There are two CUDA libraries that"
    " use Tensor Cores, cuBLAS and cuDNN. cuBLAS uses Tensor Cores to speed up"
    " GEMM computations(the matrices must be either half precision or single "
    "precision); cuDNN uses Tensor Cores to speed up both convolutions(the "
    "input and output must be half precision) and recurrent neural networks "
    "(RNNs).");

70 71 72 73 74 75 76 77 78
DEFINE_string(selected_gpus, "",
              "A list of device ids separated by comma, like: 0,1,2,3. "
              "This option is useful when doing multi process training and "
              "each process have only one device (GPU). If you want to use "
              "all visible devices, set this to empty string. NOTE: the "
              "reason of doing this is that we want to use P2P communication"
              "between GPU devices, use CUDA_VISIBLE_DEVICES can only use"
              "share-memory only.");

L
liaogang 已提交
79 80 81
namespace paddle {
namespace platform {

Z
zhouwei25 已提交
82 83
inline string ErrorLog(cudaError e) {}

S
sneaxiy 已提交
84 85 86 87 88 89 90
static int GetCUDADeviceCountImpl() {
  const auto *cuda_visible_devices = std::getenv("CUDA_VISIBLE_DEVICES");
  if (cuda_visible_devices != nullptr) {
    std::string cuda_visible_devices_str(cuda_visible_devices);
    if (std::all_of(cuda_visible_devices_str.begin(),
                    cuda_visible_devices_str.end(),
                    [](char ch) { return ch == ' '; })) {
S
sneaxiy 已提交
91
      VLOG(2) << "CUDA_VISIBLE_DEVICES is set to be empty. No GPU detected.";
S
sneaxiy 已提交
92 93 94 95
      return 0;
    }
  }

L
liaogang 已提交
96
  int count;
L
liaogang 已提交
97
  PADDLE_ENFORCE(
L
liaogang 已提交
98
      cudaGetDeviceCount(&count),
99
      "cudaGetDeviceCount failed in paddle::platform::GetCUDADeviceCount");
L
liaogang 已提交
100 101 102
  return count;
}

S
sneaxiy 已提交
103 104 105 106 107
int GetCUDADeviceCount() {
  static auto dev_cnt = GetCUDADeviceCountImpl();
  return dev_cnt;
}

108 109 110
int GetCUDAComputeCapability(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  cudaDeviceProp device_prop;
Z
zhouwei25 已提交
111
  auto e = cudaGetDeviceProperties(&device_prop, id);
Z
zhouwei25 已提交
112 113 114 115
  std::string ErrorLog;
  std::ostringstream ostr;
  ostr << "cudaGetDeviceProperties failed in"
          "paddle::platform::GetCUDAComputeCapability!"
Z
zhouwei25 已提交
116 117 118 119 120
          "Error Type ID = "
       << e << " Please see detail in:"
               "https://docs.nvidia.com/cuda/cuda-runtime-api/"
               "group__CUDART__TYPES.html#group__CUDART__TYPES_"
               "1g3f51e3575c2178246db0a94a430e0038";
Z
zhouwei25 已提交
121
  ErrorLog = ostr.str();
Z
zhouwei25 已提交
122
  PADDLE_ENFORCE(e, "helllo");
123 124 125
  return device_prop.major * 10 + device_prop.minor;
}

C
chengduo 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
int GetCUDARuntimeVersion(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int runtime_version = 0;
  PADDLE_ENFORCE(cudaRuntimeGetVersion(&runtime_version),
                 "cudaRuntimeGetVersion failed in "
                 "paddle::platform::cudaRuntimeGetVersion");
  return runtime_version;
}

int GetCUDADriverVersion(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int driver_version = 0;
  PADDLE_ENFORCE(cudaDriverGetVersion(&driver_version),
                 "cudaDriverGetVersion failed in "
                 "paddle::platform::GetCUDADriverVersion");
  return driver_version;
}

144 145 146 147 148 149 150 151 152 153
bool TensorCoreAvailable() {
#if CUDA_VERSION >= 9000
  int device = GetCurrentDeviceId();
  int driver_version = GetCUDAComputeCapability(device);
  return driver_version >= 70;
#else
  return false;
#endif
}

C
chengduoZH 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
int GetCUDAMultiProcessors(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int count;
  PADDLE_ENFORCE(
      cudaDeviceGetAttribute(&count, cudaDevAttrMultiProcessorCount, id),
      "cudaDeviceGetAttribute failed in "
      "paddle::platform::GetCUDAMultiProcessors");
  return count;
}

int GetCUDAMaxThreadsPerMultiProcessor(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int count;
  PADDLE_ENFORCE(cudaDeviceGetAttribute(
                     &count, cudaDevAttrMaxThreadsPerMultiProcessor, id),
                 "cudaDeviceGetAttribute failed in "
                 "paddle::platform::GetCUDAMaxThreadsPerMultiProcessor");
  return count;
}

L
liaogang 已提交
174 175
int GetCurrentDeviceId() {
  int device_id;
L
liaogang 已提交
176
  PADDLE_ENFORCE(
L
liaogang 已提交
177 178 179 180 181
      cudaGetDevice(&device_id),
      "cudaGetDevice failed in paddle::platform::GetCurrentDeviceId");
  return device_id;
}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
//! Get a list of device ids from environment variable or use all.
std::vector<int> GetSelectedDevices() {
  // use user specified GPUs in single-node multi-process mode.
  std::vector<int> devices;
  if (!FLAGS_selected_gpus.empty()) {
    auto devices_str = paddle::string::Split(FLAGS_selected_gpus, ',');
    for (auto id : devices_str) {
      devices.push_back(atoi(id.c_str()));
    }
  } else {
    int count = GetCUDADeviceCount();
    for (int i = 0; i < count; ++i) {
      devices.push_back(i);
    }
  }
  return devices;
}

L
liaogang 已提交
200
void SetDeviceId(int id) {
Q
qijun 已提交
201
  // TODO(qijun): find a better way to cache the cuda device count
Y
Yang Yang 已提交
202
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
L
liaogang 已提交
203
  PADDLE_ENFORCE(cudaSetDevice(id),
L
liaogang 已提交
204 205 206
                 "cudaSetDevice failed in paddle::platform::SetDeviceId");
}

207 208
void GpuMemoryUsage(size_t *available, size_t *total) {
  PADDLE_ENFORCE(cudaMemGetInfo(available, total),
L
liaogang 已提交
209 210 211 212
                 "cudaMemGetInfo failed in paddle::platform::GetMemoryUsage");
}

size_t GpuMaxAllocSize() {
Z
zhhsplendid 已提交
213 214 215 216
  return std::max(GpuInitAllocSize(), GpuReallocSize());
}

size_t GpuInitAllocSize() {
217 218 219
  if (FLAGS_initial_gpu_memory_in_mb > 0ul) {
    // Initial memory will be allocated by FLAGS_initial_gpu_memory_in_mb
    return static_cast<size_t>(FLAGS_initial_gpu_memory_in_mb << 20);
Z
zhhsplendid 已提交
220 221
  }

222
  // FLAGS_initial_gpu_memory_in_mb is 0, initial memory will be allocated by
Z
zhhsplendid 已提交
223
  // fraction
L
liaogang 已提交
224 225 226
  size_t total = 0;
  size_t available = 0;

227
  GpuMemoryUsage(&available, &total);
Z
zhhsplendid 已提交
228
  size_t reserving = static_cast<size_t>(fraction_reserve_gpu_memory * total);
L
liaogang 已提交
229

Z
zhhsplendid 已提交
230 231 232 233 234
  return static_cast<size_t>((total - reserving) *
                             FLAGS_fraction_of_gpu_memory_to_use);
}

size_t GpuReallocSize() {
235
  if (FLAGS_reallocate_gpu_memory_in_mb > 0ul) {
Z
zhouwei25 已提交
236 237
    // Additional memory will be allocated by
    // FLAGS_reallocate_gpu_memory_in_mb
238
    return static_cast<size_t>(FLAGS_reallocate_gpu_memory_in_mb << 20);
Z
zhhsplendid 已提交
239 240
  }

Z
zhouwei25 已提交
241 242
  // FLAGS_reallocate_gpu_memory_in_mb is 0, additional memory will be
  // allocated
Z
zhhsplendid 已提交
243 244 245 246 247 248 249 250 251
  // by fraction
  size_t total = 0;
  size_t available = 0;

  GpuMemoryUsage(&available, &total);
  size_t reserving = static_cast<size_t>(fraction_reserve_gpu_memory * total);

  return static_cast<size_t>((total - reserving) *
                             FLAGS_fraction_of_gpu_memory_to_use);
L
liaogang 已提交
252 253
}

L
liaogang 已提交
254 255 256 257 258 259 260
size_t GpuMinChunkSize() {
  // Allow to allocate the minimum chunk size is 256 bytes.
  return 1 << 8;
}

size_t GpuMaxChunkSize() {
  size_t total = 0;
C
chenweihang 已提交
261
  size_t available = 0;
L
liaogang 已提交
262

C
chenweihang 已提交
263
  GpuMemoryUsage(&available, &total);
M
minqiyang 已提交
264 265
  VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/"
           << total / 1024 / 1024 << "M";
Z
zhhsplendid 已提交
266
  size_t reserving = static_cast<size_t>(fraction_reserve_gpu_memory * total);
L
liaogang 已提交
267
  // If available less than minimum chunk size, no usable memory exists.
C
chenweihang 已提交
268 269 270
  available =
      std::min(std::max(available, GpuMinChunkSize()) - GpuMinChunkSize(),
               total - reserving);
271

Z
zhhsplendid 已提交
272
  size_t allocating = GpuMaxAllocSize();
L
liaogang 已提交
273

C
chenweihang 已提交
274 275
  PADDLE_ENFORCE_LE(allocating, available,
                    "Insufficient GPU memory to allocation.");
276

C
chenweihang 已提交
277
  return allocating;
L
liaogang 已提交
278 279
}

L
liaogang 已提交
280 281
void GpuMemcpyAsync(void *dst, const void *src, size_t count,
                    enum cudaMemcpyKind kind, cudaStream_t stream) {
L
liaogang 已提交
282
  PADDLE_ENFORCE(cudaMemcpyAsync(dst, src, count, kind, stream),
283 284 285
                 "cudaMemcpyAsync failed in paddle::platform::GpuMemcpyAsync "
                 "(%p -> %p, length: %d)",
                 src, dst, static_cast<int>(count));
L
liaogang 已提交
286 287
}

288 289 290
void GpuMemcpySync(void *dst, const void *src, size_t count,
                   enum cudaMemcpyKind kind) {
  PADDLE_ENFORCE(cudaMemcpy(dst, src, count, kind),
291 292 293
                 "cudaMemcpy failed in paddle::platform::GpuMemcpySync (%p -> "
                 "%p, length: %d)",
                 src, dst, static_cast<int>(count));
294 295 296 297
}

void GpuMemcpyPeerAsync(void *dst, int dst_device, const void *src,
                        int src_device, size_t count, cudaStream_t stream) {
L
liaogang 已提交
298
  PADDLE_ENFORCE(
L
liaogang 已提交
299
      cudaMemcpyPeerAsync(dst, dst_device, src, src_device, count, stream),
300 301 302 303 304 305 306 307
      "cudaMemcpyPeerAsync failed in paddle::platform::GpuMemcpyPeerAsync");
}

void GpuMemcpyPeerSync(void *dst, int dst_device, const void *src,
                       int src_device, size_t count) {
  PADDLE_ENFORCE(
      cudaMemcpyPeer(dst, dst_device, src, src_device, count),
      "cudaMemcpyPeer failed in paddle::platform::GpuMemcpyPeerSync");
L
liaogang 已提交
308
}
D
dzhwinter 已提交
309 310 311 312 313

void GpuMemsetAsync(void *dst, int value, size_t count, cudaStream_t stream) {
  PADDLE_ENFORCE(cudaMemsetAsync(dst, value, count, stream),
                 "cudaMemsetAsync failed in paddle::platform::GpuMemsetAsync");
}
L
liaogang 已提交
314 315
}  // namespace platform
}  // namespace paddle