gpu_info.cc 9.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/platform/gpu_info.h"
L
liaogang 已提交
16

17
#include <algorithm>
S
sneaxiy 已提交
18 19
#include <cstdlib>
#include <string>
L
liaogang 已提交
20

21
#include "gflags/gflags.h"
Y
Yi Wang 已提交
22
#include "paddle/fluid/platform/enforce.h"
23
#include "paddle/fluid/string/split.h"
L
liaogang 已提交
24

25
#ifndef _WIN32
P
peizhilin 已提交
26
constexpr static float fraction_of_gpu_memory_to_use = 0.92f;
27
#else
P
peizhilin 已提交
28 29 30
// fraction_of_gpu_memory_to_use cannot be too high on windows,
// since the win32 graphic sub-system can occupy some GPU memory
// which may lead to insufficient memory left for paddle
P
peizhilin 已提交
31
constexpr static float fraction_of_gpu_memory_to_use = 0.5f;
32 33 34
#endif

DEFINE_double(fraction_of_gpu_memory_to_use, fraction_of_gpu_memory_to_use,
X
Xin Pan 已提交
35 36 37 38 39
              "Allocate a trunk of gpu memory that is this fraction of the "
              "total gpu memory size. Future memory usage will be allocated "
              "from the trunk. If the trunk doesn't have enough gpu memory, "
              "additional trunks of the same size will be requested from gpu "
              "until the gpu has no memory left for another trunk.");
L
liaogang 已提交
40

41 42 43 44 45 46 47 48 49 50
DEFINE_bool(
    enable_cublas_tensor_op_math, false,
    "The enable_cublas_tensor_op_math indicate whether to use Tensor Core, "
    "but it may loss precision. Currently, There are two CUDA libraries that"
    " use Tensor Cores, cuBLAS and cuDNN. cuBLAS uses Tensor Cores to speed up"
    " GEMM computations(the matrices must be either half precision or single "
    "precision); cuDNN uses Tensor Cores to speed up both convolutions(the "
    "input and output must be half precision) and recurrent neural networks "
    "(RNNs).");

51 52 53 54 55 56 57 58 59
DEFINE_string(selected_gpus, "",
              "A list of device ids separated by comma, like: 0,1,2,3. "
              "This option is useful when doing multi process training and "
              "each process have only one device (GPU). If you want to use "
              "all visible devices, set this to empty string. NOTE: the "
              "reason of doing this is that we want to use P2P communication"
              "between GPU devices, use CUDA_VISIBLE_DEVICES can only use"
              "share-memory only.");

L
liaogang 已提交
60 61 62
namespace paddle {
namespace platform {

S
sneaxiy 已提交
63 64 65 66 67 68 69
static int GetCUDADeviceCountImpl() {
  const auto *cuda_visible_devices = std::getenv("CUDA_VISIBLE_DEVICES");
  if (cuda_visible_devices != nullptr) {
    std::string cuda_visible_devices_str(cuda_visible_devices);
    if (std::all_of(cuda_visible_devices_str.begin(),
                    cuda_visible_devices_str.end(),
                    [](char ch) { return ch == ' '; })) {
S
sneaxiy 已提交
70
      VLOG(2) << "CUDA_VISIBLE_DEVICES is set to be empty. No GPU detected.";
S
sneaxiy 已提交
71 72 73 74
      return 0;
    }
  }

L
liaogang 已提交
75
  int count;
L
liaogang 已提交
76
  PADDLE_ENFORCE(
L
liaogang 已提交
77
      cudaGetDeviceCount(&count),
78
      "cudaGetDeviceCount failed in paddle::platform::GetCUDADeviceCount");
L
liaogang 已提交
79 80 81
  return count;
}

S
sneaxiy 已提交
82 83 84 85 86
int GetCUDADeviceCount() {
  static auto dev_cnt = GetCUDADeviceCountImpl();
  return dev_cnt;
}

87 88 89 90 91 92 93 94 95
int GetCUDAComputeCapability(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  cudaDeviceProp device_prop;
  PADDLE_ENFORCE(cudaGetDeviceProperties(&device_prop, id),
                 "cudaGetDeviceProperties failed in "
                 "paddle::platform::GetCUDAComputeCapability");
  return device_prop.major * 10 + device_prop.minor;
}

C
chengduo 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
int GetCUDARuntimeVersion(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int runtime_version = 0;
  PADDLE_ENFORCE(cudaRuntimeGetVersion(&runtime_version),
                 "cudaRuntimeGetVersion failed in "
                 "paddle::platform::cudaRuntimeGetVersion");
  return runtime_version;
}

int GetCUDADriverVersion(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int driver_version = 0;
  PADDLE_ENFORCE(cudaDriverGetVersion(&driver_version),
                 "cudaDriverGetVersion failed in "
                 "paddle::platform::GetCUDADriverVersion");
  return driver_version;
}

114 115 116 117 118 119 120 121 122 123
bool TensorCoreAvailable() {
#if CUDA_VERSION >= 9000
  int device = GetCurrentDeviceId();
  int driver_version = GetCUDAComputeCapability(device);
  return driver_version >= 70;
#else
  return false;
#endif
}

C
chengduoZH 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
int GetCUDAMultiProcessors(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int count;
  PADDLE_ENFORCE(
      cudaDeviceGetAttribute(&count, cudaDevAttrMultiProcessorCount, id),
      "cudaDeviceGetAttribute failed in "
      "paddle::platform::GetCUDAMultiProcessors");
  return count;
}

int GetCUDAMaxThreadsPerMultiProcessor(int id) {
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
  int count;
  PADDLE_ENFORCE(cudaDeviceGetAttribute(
                     &count, cudaDevAttrMaxThreadsPerMultiProcessor, id),
                 "cudaDeviceGetAttribute failed in "
                 "paddle::platform::GetCUDAMaxThreadsPerMultiProcessor");
  return count;
}

L
liaogang 已提交
144 145
int GetCurrentDeviceId() {
  int device_id;
L
liaogang 已提交
146
  PADDLE_ENFORCE(
L
liaogang 已提交
147 148 149 150 151
      cudaGetDevice(&device_id),
      "cudaGetDevice failed in paddle::platform::GetCurrentDeviceId");
  return device_id;
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
//! Get a list of device ids from environment variable or use all.
std::vector<int> GetSelectedDevices() {
  // use user specified GPUs in single-node multi-process mode.
  std::vector<int> devices;
  if (!FLAGS_selected_gpus.empty()) {
    auto devices_str = paddle::string::Split(FLAGS_selected_gpus, ',');
    for (auto id : devices_str) {
      devices.push_back(atoi(id.c_str()));
    }
  } else {
    int count = GetCUDADeviceCount();
    for (int i = 0; i < count; ++i) {
      devices.push_back(i);
    }
  }
  return devices;
}

L
liaogang 已提交
170
void SetDeviceId(int id) {
Q
qijun 已提交
171
  // TODO(qijun): find a better way to cache the cuda device count
Y
Yang Yang 已提交
172
  PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count");
L
liaogang 已提交
173
  PADDLE_ENFORCE(cudaSetDevice(id),
L
liaogang 已提交
174 175 176
                 "cudaSetDevice failed in paddle::platform::SetDeviceId");
}

177 178
void GpuMemoryUsage(size_t *available, size_t *total) {
  PADDLE_ENFORCE(cudaMemGetInfo(available, total),
L
liaogang 已提交
179 180 181 182 183 184 185
                 "cudaMemGetInfo failed in paddle::platform::GetMemoryUsage");
}

size_t GpuMaxAllocSize() {
  size_t total = 0;
  size_t available = 0;

186
  GpuMemoryUsage(&available, &total);
L
liaogang 已提交
187

L
liaogang 已提交
188
  // Reserve the rest for page tables, etc.
L
liaogang 已提交
189
  return static_cast<size_t>(total * FLAGS_fraction_of_gpu_memory_to_use);
L
liaogang 已提交
190 191
}

L
liaogang 已提交
192 193 194 195 196 197 198
size_t GpuMinChunkSize() {
  // Allow to allocate the minimum chunk size is 256 bytes.
  return 1 << 8;
}

size_t GpuMaxChunkSize() {
  size_t total = 0;
C
chenweihang 已提交
199
  size_t available = 0;
L
liaogang 已提交
200

C
chenweihang 已提交
201
  GpuMemoryUsage(&available, &total);
M
minqiyang 已提交
202 203
  VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/"
           << total / 1024 / 1024 << "M";
204
  size_t reserving = static_cast<size_t>(0.05 * total);
L
liaogang 已提交
205
  // If available less than minimum chunk size, no usable memory exists.
C
chenweihang 已提交
206 207 208
  available =
      std::min(std::max(available, GpuMinChunkSize()) - GpuMinChunkSize(),
               total - reserving);
209 210

  // Reserving the rest memory for page tables, etc.
L
liaogang 已提交
211

C
chenweihang 已提交
212 213
  size_t allocating = static_cast<size_t>(FLAGS_fraction_of_gpu_memory_to_use *
                                          (total - reserving));
L
liaogang 已提交
214

C
chenweihang 已提交
215 216
  PADDLE_ENFORCE_LE(allocating, available,
                    "Insufficient GPU memory to allocation.");
217

C
chenweihang 已提交
218
  return allocating;
L
liaogang 已提交
219 220
}

L
liaogang 已提交
221 222
void GpuMemcpyAsync(void *dst, const void *src, size_t count,
                    enum cudaMemcpyKind kind, cudaStream_t stream) {
L
liaogang 已提交
223 224
  PADDLE_ENFORCE(cudaMemcpyAsync(dst, src, count, kind, stream),
                 "cudaMemcpyAsync failed in paddle::platform::GpuMemcpyAsync");
L
liaogang 已提交
225 226
}

227 228 229 230 231 232 233 234
void GpuMemcpySync(void *dst, const void *src, size_t count,
                   enum cudaMemcpyKind kind) {
  PADDLE_ENFORCE(cudaMemcpy(dst, src, count, kind),
                 "cudaMemcpy failed in paddle::platform::GpuMemcpySync");
}

void GpuMemcpyPeerAsync(void *dst, int dst_device, const void *src,
                        int src_device, size_t count, cudaStream_t stream) {
L
liaogang 已提交
235
  PADDLE_ENFORCE(
L
liaogang 已提交
236
      cudaMemcpyPeerAsync(dst, dst_device, src, src_device, count, stream),
237 238 239 240 241 242 243 244
      "cudaMemcpyPeerAsync failed in paddle::platform::GpuMemcpyPeerAsync");
}

void GpuMemcpyPeerSync(void *dst, int dst_device, const void *src,
                       int src_device, size_t count) {
  PADDLE_ENFORCE(
      cudaMemcpyPeer(dst, dst_device, src, src_device, count),
      "cudaMemcpyPeer failed in paddle::platform::GpuMemcpyPeerSync");
L
liaogang 已提交
245
}
D
dzhwinter 已提交
246 247 248 249 250

void GpuMemsetAsync(void *dst, int value, size_t count, cudaStream_t stream) {
  PADDLE_ENFORCE(cudaMemsetAsync(dst, value, count, stream),
                 "cudaMemsetAsync failed in paddle::platform::GpuMemsetAsync");
}
L
liaogang 已提交
251 252
}  // namespace platform
}  // namespace paddle