Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
58a9f9f7
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
58a9f9f7
编写于
3月 28, 2018
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
set the max size of cudapinned memory
上级
ab601c19
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
35 addition
and
5 deletion
+35
-5
paddle/fluid/memory/detail/system_allocator.cc
paddle/fluid/memory/detail/system_allocator.cc
+2
-2
paddle/fluid/memory/memory.cc
paddle/fluid/memory/memory.cc
+2
-2
paddle/fluid/memory/memory_test.cc
paddle/fluid/memory/memory_test.cc
+1
-1
paddle/fluid/platform/cpu_info.cc
paddle/fluid/platform/cpu_info.cc
+21
-0
paddle/fluid/platform/cpu_info.h
paddle/fluid/platform/cpu_info.h
+9
-0
未找到文件。
paddle/fluid/memory/detail/system_allocator.cc
浏览文件 @
58a9f9f7
...
...
@@ -125,11 +125,11 @@ bool GPUAllocator::UseGpu() const { return true; }
void
*
CUDAPinnedAllocator
::
Alloc
(
size_t
&
index
,
size_t
size
)
{
if
(
size
<=
0
)
return
nullptr
;
// NOTE: here, we use C
puMaxAllocSize()/2
as the maximum memory size
// NOTE: here, we use C
UDAPinnedMaxAllocSize
as the maximum memory size
// of host pinned allocation. Allocates too much would reduce
// the amount of memory available to the underlying system for paging.
size_t
usable
=
paddle
::
platform
::
C
puMaxAllocSize
()
/
2
-
cuda_pinnd_alloc_size_
;
paddle
::
platform
::
C
UDAPinnedMaxAllocSize
()
-
cuda_pinnd_alloc_size_
;
if
(
size
>
usable
)
return
nullptr
;
...
...
paddle/fluid/memory/memory.cc
浏览文件 @
58a9f9f7
...
...
@@ -116,8 +116,8 @@ BuddyAllocator* GetCUDAPinnedBuddyAllocator() {
static
BuddyAllocator
*
ba
=
NULL
;
if
(
ba
==
NULL
)
{
ba
=
new
BuddyAllocator
(
new
detail
::
CUDAPinnedAllocator
,
platform
::
C
pu
MinChunkSize
(),
platform
::
C
pu
MaxChunkSize
());
platform
::
C
UDAPinned
MinChunkSize
(),
platform
::
C
UDAPinned
MaxChunkSize
());
}
return
ba
;
}
...
...
paddle/fluid/memory/memory_test.cc
浏览文件 @
58a9f9f7
...
...
@@ -143,7 +143,7 @@ TEST(BuddyAllocator, GPUMultAlloc) {
size_t
align
(
size_t
size
,
paddle
::
platform
::
CUDAPinnedPlace
place
)
{
size
+=
sizeof
(
paddle
::
memory
::
detail
::
Metadata
);
size_t
alignment
=
paddle
::
platform
::
C
pu
MinChunkSize
();
size_t
alignment
=
paddle
::
platform
::
C
UDAPinned
MinChunkSize
();
size_t
remaining
=
size
%
alignment
;
return
remaining
==
0
?
size
:
size
+
(
alignment
-
remaining
);
}
...
...
paddle/fluid/platform/cpu_info.cc
浏览文件 @
58a9f9f7
...
...
@@ -27,6 +27,10 @@ DEFINE_double(fraction_of_cpu_memory_to_use, 1,
"Default use 100% of CPU memory for PaddlePaddle,"
"reserve the rest for page tables, etc"
);
DEFINE_double
(
fraction_of_cuda_pinned_memory_to_use
,
0.5
,
"Default use 100% of CPU memory for PaddlePaddle,"
"reserve the rest for page tables, etc"
);
namespace
paddle
{
namespace
platform
{
...
...
@@ -62,5 +66,22 @@ size_t CpuMaxChunkSize() {
return
CpuMaxAllocSize
()
/
32
;
}
size_t
CUDAPinnedMaxAllocSize
()
{
// For distributed systems, it requires configuring and limiting
// the fraction of memory to use.
return
FLAGS_fraction_of_cuda_pinned_memory_to_use
*
CpuTotalPhysicalMemory
();
}
size_t
CUDAPinnedMinChunkSize
()
{
// Allow to allocate the minimum chunk size is 64 KB.
return
1
<<
16
;
}
size_t
CUDAPinnedMaxChunkSize
()
{
// Allow to allocate the maximum chunk size is roughly 0.39% of CUDA_PINNED
// memory.
return
CUDAPinnedMaxAllocSize
()
/
256
;
}
}
// namespace platform
}
// namespace paddle
paddle/fluid/platform/cpu_info.h
浏览文件 @
58a9f9f7
...
...
@@ -22,11 +22,20 @@ namespace platform {
//! Get the maximum allocation size for a machine.
size_t
CpuMaxAllocSize
();
//! Get the maximum allocation size for a machine.
size_t
CUDAPinnedMaxAllocSize
();
//! Get the minimum chunk size for buddy allocator.
size_t
CpuMinChunkSize
();
//! Get the maximum chunk size for buddy allocator.
size_t
CpuMaxChunkSize
();
//! Get the minimum chunk size for buddy allocator.
size_t
CUDAPinnedMinChunkSize
();
//! Get the maximum chunk size for buddy allocator.
size_t
CUDAPinnedMaxChunkSize
();
}
// namespace platform
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录