Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0c43a376
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0c43a376
编写于
4月 07, 2018
作者:
Y
Yi Wang
提交者:
GitHub
4月 07, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix cpplint errors with paddle/fluid/platform/gpu_info.* (#9710)
* Fix cpplint errors with paddle/fluid/platform/gpu_info.* * Update
上级
55ffceaa
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
9 addition
and
9 deletion
+9
-9
paddle/fluid/memory/memory.cc
paddle/fluid/memory/memory.cc
+1
-1
paddle/fluid/platform/gpu_info.cc
paddle/fluid/platform/gpu_info.cc
+6
-5
paddle/fluid/platform/gpu_info.h
paddle/fluid/platform/gpu_info.h
+2
-3
未找到文件。
paddle/fluid/memory/memory.cc
浏览文件 @
0c43a376
...
...
@@ -95,7 +95,7 @@ void* Alloc<platform::CUDAPlace>(platform::CUDAPlace place, size_t size) {
int
cur_dev
=
platform
::
GetCurrentDeviceId
();
platform
::
SetDeviceId
(
place
.
device
);
size_t
avail
,
total
;
platform
::
GpuMemoryUsage
(
avail
,
total
);
platform
::
GpuMemoryUsage
(
&
avail
,
&
total
);
LOG
(
WARNING
)
<<
"Cannot allocate "
<<
size
<<
" bytes in GPU "
<<
place
.
device
<<
", available "
<<
avail
<<
" bytes"
;
LOG
(
WARNING
)
<<
"total "
<<
total
;
...
...
paddle/fluid/platform/gpu_info.cc
浏览文件 @
0c43a376
...
...
@@ -14,8 +14,9 @@ limitations under the License. */
#include "paddle/fluid/platform/gpu_info.h"
#include
"gflags/gflags.h"
#include
<algorithm>
#include "gflags/gflags.h"
#include "paddle/fluid/platform/enforce.h"
DEFINE_double
(
fraction_of_gpu_memory_to_use
,
0.92
,
...
...
@@ -77,8 +78,8 @@ void SetDeviceId(int id) {
"cudaSetDevice failed in paddle::platform::SetDeviceId"
);
}
void
GpuMemoryUsage
(
size_t
&
available
,
size_t
&
total
)
{
PADDLE_ENFORCE
(
cudaMemGetInfo
(
&
available
,
&
total
),
void
GpuMemoryUsage
(
size_t
*
available
,
size_t
*
total
)
{
PADDLE_ENFORCE
(
cudaMemGetInfo
(
available
,
total
),
"cudaMemGetInfo failed in paddle::platform::GetMemoryUsage"
);
}
...
...
@@ -86,7 +87,7 @@ size_t GpuMaxAllocSize() {
size_t
total
=
0
;
size_t
available
=
0
;
GpuMemoryUsage
(
available
,
total
);
GpuMemoryUsage
(
&
available
,
&
total
);
// Reserve the rest for page tables, etc.
return
static_cast
<
size_t
>
(
total
*
FLAGS_fraction_of_gpu_memory_to_use
);
...
...
@@ -101,7 +102,7 @@ size_t GpuMaxChunkSize() {
size_t
total
=
0
;
size_t
available
=
0
;
GpuMemoryUsage
(
available
,
total
);
GpuMemoryUsage
(
&
available
,
&
total
);
VLOG
(
10
)
<<
"GPU Usage "
<<
available
/
1024
/
1024
<<
"M/"
<<
total
/
1024
/
1024
<<
"M"
;
size_t
reserving
=
static_cast
<
size_t
>
(
0.05
*
total
);
...
...
paddle/fluid/platform/gpu_info.h
浏览文件 @
0c43a376
...
...
@@ -24,8 +24,7 @@ namespace paddle {
namespace
platform
{
//! Environment variable: fraction of GPU memory to use on each device.
const
std
::
string
kEnvFractionGpuMemoryToUse
=
"PADDLE_FRACTION_GPU_MEMORY_TO_USE"
;
const
char
kEnvFractionGpuMemoryToUse
[]
=
"PADDLE_FRACTION_GPU_MEMORY_TO_USE"
;
//! Get the total number of GPU devices in system.
int
GetCUDADeviceCount
();
...
...
@@ -46,7 +45,7 @@ int GetCurrentDeviceId();
void
SetDeviceId
(
int
device_id
);
//! Get the memory usage of current GPU device.
void
GpuMemoryUsage
(
size_t
&
available
,
size_t
&
total
);
void
GpuMemoryUsage
(
size_t
*
available
,
size_t
*
total
);
//! Get the maximum allocation size of current GPU device.
size_t
GpuMaxAllocSize
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录