Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
81644145
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
81644145
编写于
5月 10, 2022
作者:
W
wanghuancoder
提交者:
GitHub
5月 10, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] print gpu mem info (#42616)
* print mem * refine * refine * refine * refine
上级
8a100774
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
19 addition
and
10 deletion
+19
-10
paddle/fluid/platform/device/gpu/gpu_info.cc
paddle/fluid/platform/device/gpu/gpu_info.cc
+16
-8
tools/get_ut_mem_map.py
tools/get_ut_mem_map.py
+2
-2
tools/test_runner.py
tools/test_runner.py
+1
-0
未找到文件。
paddle/fluid/platform/device/gpu/gpu_info.cc
浏览文件 @
81644145
...
...
@@ -50,11 +50,12 @@ DECLARE_uint64(reallocate_gpu_memory_in_mb);
DECLARE_bool
(
enable_cublas_tensor_op_math
);
DECLARE_uint64
(
gpu_memory_limit_mb
);
#ifdef PADDLE_WITH_TESTING
PADDLE_DEFINE_EXPORTED_bool
(
enable_gpu_memory_usage_log
,
false
,
"Whether to print the message of gpu memory usage "
"at exit, mainly used for UT and CI."
);
#endif
PADDLE_DEFINE_EXPORTED_bool
(
enable_gpu_memory_usage_log_mb
,
true
,
"Whether to print the message of gpu memory usage "
"MB as a unit of measurement."
);
constexpr
static
float
fraction_reserve_gpu_memory
=
0.05
f
;
...
...
@@ -145,25 +146,32 @@ class RecordedGpuMallocHelper {
mtx_
.
reset
(
new
std
::
mutex
());
}
#ifdef PADDLE_WITH_TESTING
if
(
FLAGS_enable_gpu_memory_usage_log
)
{
// A fake UPDATE to trigger the construction of memory stat instances,
// make sure that they are destructed after RecordedGpuMallocHelper.
MEMORY_STAT_UPDATE
(
Reserved
,
dev_id
,
0
);
MEMORY_STAT_UPDATE
(
Allocated
,
dev_id
,
0
);
}
#endif
}
DISABLE_COPY_AND_ASSIGN
(
RecordedGpuMallocHelper
);
public:
~
RecordedGpuMallocHelper
()
{
#ifdef PADDLE_WITH_TESTING
if
(
FLAGS_enable_gpu_memory_usage_log
)
{
std
::
cout
<<
"[Memory Usage (Byte)] gpu "
<<
dev_id_
<<
" : "
<<
MEMORY_STAT_PEAK_VALUE
(
Reserved
,
dev_id_
)
<<
std
::
endl
;
if
(
FLAGS_enable_gpu_memory_usage_log_mb
)
{
std
::
cout
<<
"[Memory Usage (MB)] gpu "
<<
dev_id_
<<
" : Reserved = "
<<
MEMORY_STAT_PEAK_VALUE
(
Reserved
,
dev_id_
)
/
1048576.0
<<
", Allocated = "
<<
MEMORY_STAT_PEAK_VALUE
(
Allocated
,
dev_id_
)
/
1048576.0
<<
std
::
endl
;
}
else
{
std
::
cout
<<
"[Memory Usage (Byte)] gpu "
<<
dev_id_
<<
" : Reserved = "
<<
MEMORY_STAT_PEAK_VALUE
(
Reserved
,
dev_id_
)
<<
", Allocated = "
<<
MEMORY_STAT_PEAK_VALUE
(
Allocated
,
dev_id_
)
<<
std
::
endl
;
}
}
#endif
}
static
RecordedGpuMallocHelper
*
Instance
(
int
dev_id
)
{
...
...
tools/get_ut_mem_map.py
浏览文件 @
81644145
...
...
@@ -34,8 +34,8 @@ def get_ut_mem(rootPath):
if
'[Memory Usage (Byte)] gpu'
in
line
:
mem_reserved
=
round
(
float
(
line
.
split
(
'
[max memory reserved] gpu
'
)[
1
].
split
(
'
:'
)[
1
].
split
(
'
\\
n'
)[
0
].
strip
()
),
2
)
line
.
split
(
'
: Reserved =
'
)[
1
].
split
(
'
, Allocated = '
)[
0
]
),
2
)
if
mem_reserved
>
mem_reserved1
:
mem_reserved1
=
mem_reserved
if
'MAX_GPU_MEMORY_USE='
in
line
:
...
...
tools/test_runner.py
浏览文件 @
81644145
...
...
@@ -32,6 +32,7 @@ def main():
if
core
.
is_compiled_with_cuda
()
or
core
.
is_compiled_with_rocm
():
if
(
os
.
getenv
(
'FLAGS_enable_gpu_memory_usage_log'
)
==
None
):
os
.
environ
[
'FLAGS_enable_gpu_memory_usage_log'
]
=
'true'
os
.
environ
[
'FLAGS_enable_gpu_memory_usage_log_mb'
]
=
'false'
some_test_failed
=
False
for
module_name
in
sys
.
argv
[
1
:]:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录