Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
79373dab
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
79373dab
编写于
6月 28, 2017
作者:
L
liaogang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
TEST: Add test for system allocator and deleter
上级
b22dd128
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
60 addition
and
88 deletion
+60
-88
paddle/memory/detail/system_allocator.h
paddle/memory/detail/system_allocator.h
+35
-73
paddle/memory/detail/system_allocator_test.cc
paddle/memory/detail/system_allocator_test.cc
+25
-15
未找到文件。
paddle/memory/detail/system_allocator.h
浏览文件 @
79373dab
...
@@ -18,107 +18,69 @@ limitations under the License. */
...
@@ -18,107 +18,69 @@ limitations under the License. */
#include <sys/mman.h> // for mlock and munlock
#include <sys/mman.h> // for mlock and munlock
#include <cstdlib> // for malloc and free
#include <cstdlib> // for malloc and free
#ifndef PADDLE_ONLY_CPU
#include <gflags/gflags.h>
#include <thrust/system/cuda/error.h>
#include <thrust/system_error.h>
#endif // PADDLE_ONLY_CPU
#include "paddle/platform/assert.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/cuda.h"
DEFINE_bool
(
uses_pinned_memory
,
false
,
"If set, allocate cpu/gpu pinned memory."
);
namespace
paddle
{
namespace
paddle
{
namespace
memory
{
namespace
memory
{
namespace
detail
{
namespace
detail
{
class
CPUDeleter
{
// If uses_pinned_memory is true, CPUAllocator calls mlock, which
public:
// returns pinned and locked memory as staging areas for data exchange
CPUDeleter
(
void
*
ptr
,
size_t
size
,
bool
locked
)
// between host and device. Allocates too much would reduce the amount
:
ptr_
(
ptr
),
size_
(
size
),
locked_
(
locked
)
{}
// of memory available to the system for paging. So, by default, we
// should set false to uses_pinned_memory.
void
*
Ptr
()
{
return
ptr_
;
}
void
operator
()(
void
*
ptr
)
{
PADDLE_ASSERT
(
ptr
==
ptr_
);
if
(
ptr_
!=
nullptr
&&
locked_
)
{
munlock
(
ptr_
,
size_
);
}
std
::
free
(
ptr_
);
}
private:
void
*
ptr_
;
size_t
size_
;
bool
locked_
;
};
// CPUAllocator<lock_memory=true> calls mlock, which returns pinned
// and locked memory as staging areas for data exchange between host
// and device. Allocates too much would reduce the amount of memory
// available to the system for paging. So, by default, we should use
// CPUAllocator<staging=false>.
template
<
bool
lock_memory
>
class
CPUAllocator
{
class
CPUAllocator
{
public:
public:
static
CPUDeleter
Alloc
(
size_t
size
)
{
static
void
*
Alloc
(
size_t
size
)
{
void
*
p
=
std
::
malloc
(
size
);
void
*
p
=
std
::
malloc
(
size
);
if
(
p
!=
nullptr
&&
lock
_memory
)
{
if
(
p
!=
nullptr
&&
FLAGS_uses_pinned
_memory
)
{
mlock
(
p
,
size
);
mlock
(
p
,
size
);
}
}
return
CPUDeleter
(
p
,
size
,
lock_memory
)
;
return
p
;
}
}
};
#ifndef PADDLE_ONLY_CPU // The following code are for CUDA.
static
void
Free
(
void
*
p
,
size_t
size
)
{
if
(
p
!=
nullptr
&&
FLAGS_uses_pinned_memory
)
{
namespace
{
munlock
(
p
,
size
);
inline
void
throw_on_error
(
cudaError_t
e
,
const
char
*
message
)
{
if
(
e
)
{
throw
thrust
::
system_error
(
e
,
thrust
::
cuda_category
(),
message
);
}
}
}
std
::
free
(
p
);
}
// namespace
class
GPUDeleter
{
public:
GPUDeleter
(
void
*
ptr
,
size_t
size
,
bool
staging
)
:
ptr_
(
ptr
),
size_
(
size
),
staging_
(
staging
)
{}
void
*
Ptr
()
{
return
ptr_
;
}
void
operator
()(
void
*
ptr
)
{
PADDLE_ASSERT
(
ptr
==
ptr_
);
// Purposefully allow cudaErrorCudartUnloading, because
// that is returned if you ever call cudaFree after the
// driver has already shutdown. This happens only if the
// process is terminating, in which case we don't care if
// cudaFree succeeds.
cudaError_t
err
=
staging_
?
cudaFreeHost
(
ptr
)
:
cudaFree
(
ptr
);
if
(
err
!=
cudaErrorCudartUnloading
)
{
throw_on_error
(
err
,
"cudaFree{Host} failed"
);
}
}
}
private:
void
*
ptr_
;
size_t
size_
;
bool
staging_
;
};
};
#ifndef PADDLE_ONLY_CPU // The following code are for CUDA.
// GPUAllocator<staging=true> calls cudaHostMalloc, which returns
// GPUAllocator<staging=true> calls cudaHostMalloc, which returns
// pinned and locked memory as staging areas for data exchange
// pinned and locked memory as staging areas for data exchange
// between host and device. Allocates too much would reduce the
// between host and device. Allocates too much would reduce the
// amount of memory available to the system for paging. So, by
// amount of memory available to the system for paging. So, by
// default, we should use GPUAllocator<staging=false>.
// default, we should use GPUAllocator<staging=false>.
template
<
bool
staging
>
class
GPUAllocator
{
class
GPUAllocator
{
public:
public:
static
GPUDeleter
Alloc
(
size_t
size
)
{
static
void
*
Alloc
(
size_t
size
)
{
void
*
p
=
0
;
void
*
p
=
0
;
cudaError_t
result
=
cudaError_t
result
=
FLAGS_uses_pinned_memory
?
cudaMallocHost
(
&
p
,
size
)
staging
?
cudaMallocHost
(
&
p
,
size
)
:
cudaMalloc
(
&
p
,
size
);
:
cudaMalloc
(
&
p
,
size
);
if
(
result
!=
cudaSuccess
)
{
if
(
result
!=
cudaSuccess
)
{
cudaGetLastError
();
// clear error if there is any.
cudaGetLastError
();
// clear error if there is any.
}
}
return
GPUDeleter
(
result
==
cudaSuccess
?
p
:
nullptr
,
size
,
staging
);
return
result
==
cudaSuccess
?
p
:
nullptr
;
}
static
void
Free
(
void
*
p
,
size_t
size
)
{
// Purposefully allow cudaErrorCudartUnloading, because
// that is returned if you ever call cudaFree after the
// driver has already shutdown. This happens only if the
// process is terminating, in which case we don't care if
// cudaFree succeeds.
cudaError_t
err
=
FLAGS_uses_pinned_memory
?
cudaFreeHost
(
p
)
:
cudaFree
(
p
);
if
(
err
!=
cudaErrorCudartUnloading
)
{
platform
::
throw_on_error
(
err
,
"cudaFree{Host} failed"
);
}
}
}
};
};
...
...
paddle/memory/detail/system_allocator_test.cc
浏览文件 @
79373dab
...
@@ -17,34 +17,44 @@ limitations under the License. */
...
@@ -17,34 +17,44 @@ limitations under the License. */
#include <memory>
#include <memory>
#include <vector>
#include <vector>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "gtest/gtest.h"
template
<
typename
Allocator
>
template
<
typename
Allocator
>
void
TestAllocator
()
{
void
TestAllocator
(
void
*
p
)
{
{
p
=
Allocator
::
Alloc
(
1024
);
auto
d
=
Allocator
::
Alloc
(
sizeof
(
int
));
EXPECT_NE
(
d
.
Ptr
(),
nullptr
);
int
*
i
=
static_cast
<
int
*>
(
p
);
std
::
unique_ptr
<
int
>
p
(
static_cast
<
int
*>
(
d
.
Ptr
()),
d
);
std
::
shared_ptr
<
int
>
ptr
(
i
,
[](
int
*
p
)
{
Allocator
::
Free
(
p
,
1024
);
});
}
{
EXPECT_NE
(
p
,
nullptr
);
auto
d
=
Allocator
::
Alloc
(
0
);
EXPECT_EQ
(
d
.
Ptr
(),
nullptr
);
std
::
unique_ptr
<
int
>
p
(
static_cast
<
int
*>
(
d
.
Ptr
()),
d
);
}
}
}
TEST
(
CPUAllocator
,
NoLockMem
)
{
TEST
(
CPUAllocator
,
NoLockMem
)
{
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
<
false
>>
();
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
false
;
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
}
}
TEST
(
CPUAllocator
,
LockMem
)
{
TEST
(
CPUAllocator
,
LockMem
)
{
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
<
true
>>
();
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
true
;
TestAllocator
<
paddle
::
memory
::
detail
::
CPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
}
}
#ifndef PADDLE_ONLY_CPU
#ifndef PADDLE_ONLY_CPU
TEST
(
GPUAllocator
,
NoStaging
)
{
TEST
(
GPUAllocator
,
NoStaging
)
{
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
<
false
>>
();
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
false
;
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
}
}
TEST
(
GPUAllocator
,
Staging
)
{
TEST
(
GPUAllocator
,
Staging
)
{
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
<
true
>>
();
void
*
p
=
nullptr
;
FLAGS_uses_pinned_memory
=
true
;
TestAllocator
<
paddle
::
memory
::
detail
::
GPUAllocator
>
(
p
);
EXPECT_EQ
(
p
,
nullptr
);
}
}
#endif // PADDLE_ONLY_CPU
#endif // PADDLE_ONLY_CPU
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录