Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
f149d183
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f149d183
编写于
6月 26, 2017
作者:
Y
Yi Wang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add system_allocator
上级
f7530e89
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
81 addition
and
53 deletion
+81
-53
paddle/memory/detail/CMakeLists.txt
paddle/memory/detail/CMakeLists.txt
+5
-1
paddle/memory/detail/system_allocator.h
paddle/memory/detail/system_allocator.h
+53
-31
paddle/memory/detail/system_allocator_test.cc
paddle/memory/detail/system_allocator_test.cc
+23
-21
未找到文件。
paddle/memory/detail/CMakeLists.txt
浏览文件 @
f149d183
cc_test
(
system_allocator_test SRCS system_allocator_test.cc
)
if
(
${
WITH_GPU
}
)
nv_test
(
system_allocator_test SRCS system_allocator_test.cc
)
else
(
${
WITH_GPU
}
)
cc_test
(
system_allocator_test SRCS system_allocator_test.cc
)
endif
(
${
WITH_GPU
}
)
paddle/memory/detail/system_allocator.h
浏览文件 @
f149d183
...
...
@@ -23,14 +23,31 @@ limitations under the License. */
#include <thrust/system_error.h>
#endif // PADDLE_ONLY_CPU
#include "paddle/platform/assert.h"
namespace
paddle
{
namespace
memory
{
namespace
detail
{
class
SystemAllocato
r
{
class
CPUDelete
r
{
public:
virtual
void
*
Alloc
(
size_t
size
)
=
0
;
virtual
void
*
Free
(
void
*
p
)
=
0
;
CPUDeleter
(
void
*
ptr
,
size_t
size
,
bool
locked
)
:
ptr_
(
ptr
),
size_
(
size
),
locked_
(
locked
)
{}
void
*
Ptr
()
{
return
ptr_
;
}
void
operator
()(
void
*
ptr
)
{
PADDLE_ASSERT
(
ptr
==
ptr_
);
if
(
ptr_
!=
nullptr
&&
locked_
)
{
munlock
(
ptr_
,
size_
);
}
std
::
free
(
ptr_
);
}
private:
void
*
ptr_
;
size_t
size_
;
bool
locked_
;
};
// CPUAllocator<lock_memory=true> calls mlock, which returns pinned
...
...
@@ -39,21 +56,14 @@ class SystemAllocator {
// available to the system for paging. So, by default, we should use
// CPUAllocator<staging=false>.
template
<
bool
lock_memory
>
class
CPUAllocator
:
public
SystemAllocator
{
class
CPUAllocator
{
public:
virtual
void
*
Alloc
(
size_t
size
)
{
static
CPUDeleter
Alloc
(
size_t
size
)
{
void
*
p
=
std
::
malloc
(
size
);
if
(
p
!=
nullptr
&&
lock_memory
)
{
mlock
(
p
,
size
);
}
return
p
;
}
virtual
void
Free
(
void
*
p
,
size_t
size
)
{
if
(
p
!=
nullptr
&&
lock_memory
)
{
munlock
(
p
,
size
);
}
std
::
free
(
p
);
return
CPUDeleter
(
p
,
size
,
lock_memory
);
}
};
...
...
@@ -67,6 +77,32 @@ inline void throw_on_error(cudaError_t e, const char* message) {
}
}
// namespace
class
GPUDeleter
{
public:
GPUDeleter
(
void
*
ptr
,
size_t
size
,
bool
staging
)
:
ptr_
(
ptr
),
size_
(
size
),
staging_
(
staging
)
{}
void
*
Ptr
()
{
return
ptr_
;
}
void
operator
()(
void
*
ptr
)
{
PADDLE_ASSERT
(
ptr
==
ptr_
);
// Purposefully allow cudaErrorCudartUnloading, because
// that is returned if you ever call cudaFree after the
// driver has already shutdown. This happens only if the
// process is terminating, in which case we don't care if
// cudaFree succeeds.
cudaError_t
err
=
staging_
?
cudaFreeHost
(
ptr
)
:
cudaFree
(
ptr
);
if
(
err
!=
cudaErrorCudartUnloading
)
{
throw_on_error
(
err
,
"cudaFree{Host} failed"
);
}
}
private:
void
*
ptr_
;
size_t
size_
;
bool
staging_
;
};
// GPUAllocator<staging=true> calls cudaHostMalloc, which returns
// pinned and locked memory as staging areas for data exchange
// between host and device. Allocates too much would reduce the
...
...
@@ -75,28 +111,14 @@ inline void throw_on_error(cudaError_t e, const char* message) {
template
<
bool
staging
>
class
GPUAllocator
{
public:
void
*
Alloc
(
size_t
size
)
{
static
GPUDeleter
Alloc
(
size_t
size
)
{
void
*
p
=
0
;
cudaError_t
result
=
staging
?
cudaMallocHost
(
&
p
,
size
)
:
cudaMalloc
(
&
p
,
size
);
if
(
result
==
cudaSuccess
)
{
return
p
;
}
// clear last error
cudaGetLastError
();
return
nullptr
;
}
void
Free
(
void
*
p
,
size_t
size
)
{
// Purposefully allow cudaErrorCudartUnloading, because
// that is returned if you ever call cudaFree after the
// driver has already shutdown. This happens only if the
// process is terminating, in which case we don't care if
// cudaFree succeeds.
auto
err
=
staging
?
cudaFreeHost
(
p
)
:
cudaFree
(
p
);
if
(
err
!=
cudaErrorCudartUnloading
)
{
throw_on_error
(
err
,
"cudaFree failed"
);
if
(
result
!=
cudaSuccess
)
{
cudaGetLastError
();
// clear error if there is any.
}
return
GPUDeleter
(
result
==
cudaSuccess
?
p
:
nullptr
,
size
,
staging
);
}
};
...
...
paddle/memory/detail/system_allocator_test.cc
浏览文件 @
f149d183
...
...
@@ -13,36 +13,38 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/memory/detail/system_allocator.h"
#include <memory>
#include <vector>
#include "gtest/gtest.h"
TEST
(
CPUAllocator
,
NoLockMem
)
{
paddle
::
memory
::
detail
::
CPUAllocator
<
false
>
a
;
void
*
p
=
a
.
Alloc
(
4096
);
EXPECT_NE
(
p
,
nullptr
);
a
.
Free
(
p
,
4096
);
template
<
typename
Allocator
>
void
TestAllocator
()
{
{
auto
d
=
Allocator
::
Alloc
(
sizeof
(
int
));
EXPECT_NE
(
d
.
Ptr
(),
nullptr
);
std
::
unique_ptr
<
int
>
p
(
static_cast
<
int
*>
(
d
.
Ptr
()),
d
);
}
{
auto
d
=
Allocator
::
Alloc
(
0
);
EXPECT_EQ
(
d
.
Ptr
(),
nullptr
);
std
::
unique_ptr
<
int
>
p
(
static_cast
<
int
*>
(
d
.
Ptr
()),
d
);
}
}
TEST
(
CPUAllocator
,
NoLockMem
)
{
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
<
false
>>
();
}
TEST
(
CPUAllocator
,
LockMem
)
{
paddle
::
memory
::
detail
::
CPUAllocator
<
true
>
a
;
void
*
p
=
a
.
Alloc
(
4096
);
EXPECT_NE
(
p
,
nullptr
);
a
.
Free
(
p
,
4096
);
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
<
true
>>
();
}
#ifndef PADDLE_ONLY_CPU
TEST
(
GPUAllocator
,
NonStaging
)
{
paddle
::
memory
::
detail
::
GPUAllocator
<
false
>
a
;
void
*
p
=
a
.
Alloc
(
4096
);
EXPECT_NE
(
p
,
nullptr
);
a
.
Free
(
p
,
4096
);
TEST
(
GPUAllocator
,
NoStaging
)
{
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
<
false
>>
();
}
TEST
(
GPUAllocator
,
Staging
)
{
paddle
::
memory
::
detail
::
GPUAllocator
<
true
>
a
;
void
*
p
=
a
.
Alloc
(
4096
);
EXPECT_NE
(
p
,
nullptr
);
a
.
Free
(
p
,
4096
);
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
<
true
>>
();
}
#endif // PADDLE_ONLY_CPU
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录