Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b8f5922d
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b8f5922d
编写于
6月 27, 2017
作者:
Y
Yi Wang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Make CPUAllocator and GPUAllocator subclasses of SystemAllocator
上级
79373dab
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
59 addition
and
84 deletion
+59
-84
paddle/memory/detail/CMakeLists.txt
paddle/memory/detail/CMakeLists.txt
+4
-2
paddle/memory/detail/system_allocator.h
paddle/memory/detail/system_allocator.h
+21
-59
paddle/memory/detail/system_allocator_test.cc
paddle/memory/detail/system_allocator_test.cc
+34
-23
未找到文件。
paddle/memory/detail/CMakeLists.txt
浏览文件 @
b8f5922d
if
(
${
WITH_GPU
}
)
nv_test
(
system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog
)
nv_library
(
system_allocator SRCS system_allocator.cc DEPS gflags
)
nv_test
(
system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator gflags
)
else
(
${
WITH_GPU
}
)
cc_test
(
system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog
)
cc_library
(
system_allocator SRCS system_allocator.cc DEPS gflags
)
cc_test
(
system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator gflags
)
endif
(
${
WITH_GPU
}
)
paddle/memory/detail/system_allocator.h
浏览文件 @
b8f5922d
...
...
@@ -14,76 +14,38 @@ limitations under the License. */
#pragma once
#include <stddef.h> // for size_t
#include <sys/mman.h> // for mlock and munlock
#include <cstdlib> // for malloc and free
#include <gflags/gflags.h>
#include "paddle/platform/assert.h"
#include "paddle/platform/cuda.h"
DEFINE_bool
(
uses_pinned_memory
,
false
,
"If set, allocate cpu/gpu pinned memory."
);
#include <stddef.h> // for size_t
namespace
paddle
{
namespace
memory
{
namespace
detail
{
// If uses_pinned_memory is true, CPUAllocator calls mlock, which
// returns pinned and locked memory as staging areas for data exchange
// between host and device. Allocates too much would reduce the amount
// of memory available to the system for paging. So, by default, we
// should set false to uses_pinned_memory.
class
CPUAllocator
{
// SystemAllocator is the parent class of CPUAllocator and
// GPUAllocator. A BuddyAllocator object uses a SystemAllocator*
// pointing to the underlying system allocator. An alternative to
// this class hierarchy is to pass a system allocator class to
// BuddyAllocator as a template parameter. This approach makes
// BuddyAllocator a class template, and it's very complicated
// algorithm would make the buddy_allocator.h messy.
class
SystemAllocator
{
public:
static
void
*
Alloc
(
size_t
size
)
{
void
*
p
=
std
::
malloc
(
size
);
if
(
p
!=
nullptr
&&
FLAGS_uses_pinned_memory
)
{
mlock
(
p
,
size
);
}
return
p
;
}
static
void
Free
(
void
*
p
,
size_t
size
)
{
if
(
p
!=
nullptr
&&
FLAGS_uses_pinned_memory
)
{
munlock
(
p
,
size
);
}
std
::
free
(
p
);
}
virtual
~
SystemAllocator
()
{}
virtual
void
*
Alloc
(
size_t
size
)
=
0
;
virtual
void
Free
(
void
*
p
,
size_t
size
)
=
0
;
};
#ifndef PADDLE_ONLY_CPU // The following code are for CUDA.
// GPUAllocator<staging=true> calls cudaHostMalloc, which returns
// pinned and locked memory as staging areas for data exchange
// between host and device. Allocates too much would reduce the
// amount of memory available to the system for paging. So, by
// default, we should use GPUAllocator<staging=false>.
class
GPUAllocator
{
class
CPUAllocator
:
public
SystemAllocator
{
public:
static
void
*
Alloc
(
size_t
size
)
{
void
*
p
=
0
;
cudaError_t
result
=
FLAGS_uses_pinned_memory
?
cudaMallocHost
(
&
p
,
size
)
:
cudaMalloc
(
&
p
,
size
);
if
(
result
!=
cudaSuccess
)
{
cudaGetLastError
();
// clear error if there is any.
}
return
result
==
cudaSuccess
?
p
:
nullptr
;
}
static
void
Free
(
void
*
p
,
size_t
size
)
{
// Purposefully allow cudaErrorCudartUnloading, because
// that is returned if you ever call cudaFree after the
// driver has already shutdown. This happens only if the
// process is terminating, in which case we don't care if
// cudaFree succeeds.
cudaError_t
err
=
FLAGS_uses_pinned_memory
?
cudaFreeHost
(
p
)
:
cudaFree
(
p
);
if
(
err
!=
cudaErrorCudartUnloading
)
{
platform
::
throw_on_error
(
err
,
"cudaFree{Host} failed"
);
}
}
virtual
void
*
Alloc
(
size_t
size
);
virtual
void
Free
(
void
*
p
,
size_t
size
);
};
#ifndef PADDLE_ONLY_CPU
class
GPUAllocator
:
public
SystemAllocator
{
public:
virtual
void
*
Alloc
(
size_t
size
);
virtual
void
Free
(
void
*
p
,
size_t
size
);
};
#endif // PADDLE_ONLY_CPU
}
// namespace detail
...
...
paddle/memory/detail/system_allocator_test.cc
浏览文件 @
b8f5922d
...
...
@@ -17,44 +17,55 @@ limitations under the License. */
#include <memory>
#include <vector>
#include "g
log/logging
.h"
#include "g
flags/gflags
.h"
#include "gtest/gtest.h"
template
<
typename
Allocator
>
void
TestAllocator
(
void
*
p
)
{
p
=
Allocator
::
Alloc
(
1024
);
DECLARE_bool
(
use_pinned_memory
);
int
*
i
=
static_cast
<
int
*>
(
p
);
std
::
shared_ptr
<
int
>
ptr
(
i
,
[](
int
*
p
)
{
Allocator
::
Free
(
p
,
1024
);
});
void
TestAllocator
(
paddle
::
memory
::
detail
::
SystemAllocator
*
a
,
size_t
size
)
{
bool
freed
=
false
;
{
void
*
p
=
a
->
Alloc
(
size
);
if
(
size
>
0
)
{
EXPECT_NE
(
p
,
nullptr
);
}
else
{
EXPECT_EQ
(
p
,
nullptr
);
}
EXPECT_NE
(
p
,
nullptr
);
int
*
i
=
static_cast
<
int
*>
(
p
);
std
::
shared_ptr
<
int
>
ptr
(
i
,
[
&
freed
,
a
,
size
](
void
*
p
)
{
freed
=
true
;
a
->
Free
(
p
,
size
);
});
}
EXPECT_TRUE
(
freed
);
}
TEST
(
CPUAllocator
,
NoLockMem
)
{
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
false
;
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
FLAGS_use_pinned_memory
=
false
;
paddle
::
memory
::
detail
::
CPUAllocator
a
;
TestAllocator
(
&
a
,
2048
);
TestAllocator
(
&
a
,
0
);
}
TEST
(
CPUAllocator
,
LockMem
)
{
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
true
;
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
FLAGS_use_pinned_memory
=
true
;
paddle
::
memory
::
detail
::
CPUAllocator
a
;
TestAllocator
(
&
a
,
2048
);
TestAllocator
(
&
a
,
0
);
}
#ifndef PADDLE_ONLY_CPU
TEST
(
GPUAllocator
,
NoStaging
)
{
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
false
;
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
FLAGS_use_pinned_memory
=
false
;
paddle
::
memory
::
detail
::
GPUAllocator
a
;
TestAllocator
(
&
a
,
2048
);
TestAllocator
(
&
a
,
0
);
}
TEST
(
GPUAllocator
,
Staging
)
{
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
true
;
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
FLAGS_use_pinned_memory
=
true
;
paddle
::
memory
::
detail
::
GPUAllocator
a
;
TestAllocator
(
&
a
,
2048
);
TestAllocator
(
&
a
,
0
);
}
#endif // PADDLE_ONLY_CPU
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录