未验证 提交 0c43a376 编写于 作者: Y Yi Wang 提交者: GitHub

Fix cpplint errors with paddle/fluid/platform/gpu_info.* (#9710)

* Fix cpplint errors with paddle/fluid/platform/gpu_info.*

* Update
上级 55ffceaa
...@@ -95,7 +95,7 @@ void* Alloc<platform::CUDAPlace>(platform::CUDAPlace place, size_t size) { ...@@ -95,7 +95,7 @@ void* Alloc<platform::CUDAPlace>(platform::CUDAPlace place, size_t size) {
int cur_dev = platform::GetCurrentDeviceId(); int cur_dev = platform::GetCurrentDeviceId();
platform::SetDeviceId(place.device); platform::SetDeviceId(place.device);
size_t avail, total; size_t avail, total;
platform::GpuMemoryUsage(avail, total); platform::GpuMemoryUsage(&avail, &total);
LOG(WARNING) << "Cannot allocate " << size << " bytes in GPU " LOG(WARNING) << "Cannot allocate " << size << " bytes in GPU "
<< place.device << ", available " << avail << " bytes"; << place.device << ", available " << avail << " bytes";
LOG(WARNING) << "total " << total; LOG(WARNING) << "total " << total;
......
...@@ -14,8 +14,9 @@ limitations under the License. */ ...@@ -14,8 +14,9 @@ limitations under the License. */
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
#include "gflags/gflags.h" #include <algorithm>
#include "gflags/gflags.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
DEFINE_double(fraction_of_gpu_memory_to_use, 0.92, DEFINE_double(fraction_of_gpu_memory_to_use, 0.92,
...@@ -77,8 +78,8 @@ void SetDeviceId(int id) { ...@@ -77,8 +78,8 @@ void SetDeviceId(int id) {
"cudaSetDevice failed in paddle::platform::SetDeviceId"); "cudaSetDevice failed in paddle::platform::SetDeviceId");
} }
void GpuMemoryUsage(size_t &available, size_t &total) { void GpuMemoryUsage(size_t *available, size_t *total) {
PADDLE_ENFORCE(cudaMemGetInfo(&available, &total), PADDLE_ENFORCE(cudaMemGetInfo(available, total),
"cudaMemGetInfo failed in paddle::platform::GetMemoryUsage"); "cudaMemGetInfo failed in paddle::platform::GetMemoryUsage");
} }
...@@ -86,7 +87,7 @@ size_t GpuMaxAllocSize() { ...@@ -86,7 +87,7 @@ size_t GpuMaxAllocSize() {
size_t total = 0; size_t total = 0;
size_t available = 0; size_t available = 0;
GpuMemoryUsage(available, total); GpuMemoryUsage(&available, &total);
// Reserve the rest for page tables, etc. // Reserve the rest for page tables, etc.
return static_cast<size_t>(total * FLAGS_fraction_of_gpu_memory_to_use); return static_cast<size_t>(total * FLAGS_fraction_of_gpu_memory_to_use);
...@@ -101,7 +102,7 @@ size_t GpuMaxChunkSize() { ...@@ -101,7 +102,7 @@ size_t GpuMaxChunkSize() {
size_t total = 0; size_t total = 0;
size_t available = 0; size_t available = 0;
GpuMemoryUsage(available, total); GpuMemoryUsage(&available, &total);
VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/" VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/"
<< total / 1024 / 1024 << "M"; << total / 1024 / 1024 << "M";
size_t reserving = static_cast<size_t>(0.05 * total); size_t reserving = static_cast<size_t>(0.05 * total);
......
...@@ -24,8 +24,7 @@ namespace paddle { ...@@ -24,8 +24,7 @@ namespace paddle {
namespace platform { namespace platform {
//! Environment variable: fraction of GPU memory to use on each device. //! Environment variable: fraction of GPU memory to use on each device.
const std::string kEnvFractionGpuMemoryToUse = const char kEnvFractionGpuMemoryToUse[] = "PADDLE_FRACTION_GPU_MEMORY_TO_USE";
"PADDLE_FRACTION_GPU_MEMORY_TO_USE";
//! Get the total number of GPU devices in system. //! Get the total number of GPU devices in system.
int GetCUDADeviceCount(); int GetCUDADeviceCount();
...@@ -46,7 +45,7 @@ int GetCurrentDeviceId(); ...@@ -46,7 +45,7 @@ int GetCurrentDeviceId();
void SetDeviceId(int device_id); void SetDeviceId(int device_id);
//! Get the memory usage of current GPU device. //! Get the memory usage of current GPU device.
void GpuMemoryUsage(size_t &available, size_t &total); void GpuMemoryUsage(size_t *available, size_t *total);
//! Get the maximum allocation size of current GPU device. //! Get the maximum allocation size of current GPU device.
size_t GpuMaxAllocSize(); size_t GpuMaxAllocSize();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册