Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
ea81f8ee
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ea81f8ee
编写于
11月 14, 2018
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Clean interface of allocator
Clean managed/umnamaged allocator
上级
02631965
变更
25
隐藏空白更改
内联
并排
Showing
25 changed file
with
347 addition
and
503 deletion
+347
-503
paddle/fluid/memory/allocation/CMakeLists.txt
paddle/fluid/memory/allocation/CMakeLists.txt
+1
-5
paddle/fluid/memory/allocation/aligned_allocator.cc
paddle/fluid/memory/allocation/aligned_allocator.cc
+1
-6
paddle/fluid/memory/allocation/aligned_allocator.h
paddle/fluid/memory/allocation/aligned_allocator.h
+3
-5
paddle/fluid/memory/allocation/allocator.cc
paddle/fluid/memory/allocation/allocator.cc
+5
-0
paddle/fluid/memory/allocation/allocator.h
paddle/fluid/memory/allocation/allocator.h
+20
-9
paddle/fluid/memory/allocation/allocator_facade.cc
paddle/fluid/memory/allocation/allocator_facade.cc
+15
-24
paddle/fluid/memory/allocation/auto_increment_allocator.cc
paddle/fluid/memory/allocation/auto_increment_allocator.cc
+50
-9
paddle/fluid/memory/allocation/auto_increment_allocator.h
paddle/fluid/memory/allocation/auto_increment_allocator.h
+5
-61
paddle/fluid/memory/allocation/best_fit_allocator.cc
paddle/fluid/memory/allocation/best_fit_allocator.cc
+43
-44
paddle/fluid/memory/allocation/best_fit_allocator.h
paddle/fluid/memory/allocation/best_fit_allocator.h
+11
-6
paddle/fluid/memory/allocation/buffered_allocator.cc
paddle/fluid/memory/allocation/buffered_allocator.cc
+31
-28
paddle/fluid/memory/allocation/buffered_allocator.h
paddle/fluid/memory/allocation/buffered_allocator.h
+14
-7
paddle/fluid/memory/allocation/conditional_allocator.cc
paddle/fluid/memory/allocation/conditional_allocator.cc
+14
-10
paddle/fluid/memory/allocation/conditional_allocator.h
paddle/fluid/memory/allocation/conditional_allocator.h
+8
-19
paddle/fluid/memory/allocation/cpu_allocator.cc
paddle/fluid/memory/allocation/cpu_allocator.cc
+15
-9
paddle/fluid/memory/allocation/cpu_allocator.h
paddle/fluid/memory/allocation/cpu_allocator.h
+9
-7
paddle/fluid/memory/allocation/locked_allocator.cc
paddle/fluid/memory/allocation/locked_allocator.cc
+19
-23
paddle/fluid/memory/allocation/locked_allocator.h
paddle/fluid/memory/allocation/locked_allocator.h
+9
-7
paddle/fluid/memory/allocation/naive_managed_allocator.cc
paddle/fluid/memory/allocation/naive_managed_allocator.cc
+0
-69
paddle/fluid/memory/allocation/naive_managed_allocator.h
paddle/fluid/memory/allocation/naive_managed_allocator.h
+0
-76
paddle/fluid/memory/allocation/retry_allocator.cc
paddle/fluid/memory/allocation/retry_allocator.cc
+13
-26
paddle/fluid/memory/allocation/retry_allocator.h
paddle/fluid/memory/allocation/retry_allocator.h
+16
-35
paddle/fluid/memory/allocation/underlying_manual_allocation.h
...le/fluid/memory/allocation/underlying_manual_allocation.h
+35
-0
paddle/fluid/memory/allocation/zero_size_allocator.cc
paddle/fluid/memory/allocation/zero_size_allocator.cc
+3
-8
paddle/fluid/memory/allocation/zero_size_allocator.h
paddle/fluid/memory/allocation/zero_size_allocator.h
+7
-10
未找到文件。
paddle/fluid/memory/allocation/CMakeLists.txt
浏览文件 @
ea81f8ee
...
...
@@ -29,9 +29,6 @@ else()
cpu_allocator
)
endif
()
cc_library
(
naive_managed_allocator SRCS naive_managed_allocator.cc DEPS allocator
)
cc_test
(
naive_managed_allocator_test SRCS naive_managed_allocator_test.cc DEPS naive_managed_allocator
)
nv_library
(
pinned_allocator SRCS pinned_allocator.cc DEPS allocator
)
if
(
WITH_GPU
)
set
(
AllocatorFacadeDeps gpu_info cuda_allocator pinned_allocator cuda_device_guard
)
...
...
@@ -49,7 +46,6 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS
cpu_allocator
locked_allocator
best_fit_allocator
naive_managed_allocator
aligned_allocator
auto_increment_allocator
zero_size_allocator
...
...
@@ -61,6 +57,6 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS
nv_test
(
allocation_and_eigen_test SRCS allocation_and_eigen_test.cu DEPS allocator_facade
)
cc_test
(
retry_allocator_test SRCS retry_allocator_test.cc DEPS retry_allocator
naive_managed_allocator
best_fit_allocator locked_allocator cpu_allocator
)
cc_test
(
retry_allocator_test SRCS retry_allocator_test.cc DEPS retry_allocator best_fit_allocator locked_allocator cpu_allocator
)
cc_test
(
allocator_facade_test SRCS allocator_facade_test.cc DEPS allocator_facade
)
paddle/fluid/memory/allocation/aligned_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -19,14 +19,9 @@ namespace memory {
namespace
allocation
{
ThinAlignedAllocator
::
ThinAlignedAllocator
(
std
::
shared_ptr
<
Managed
Allocator
>
underlyning_allocator
)
std
::
shared_ptr
<
Allocator
>
underlyning_allocator
)
:
underlying_allocator_
(
std
::
move
(
underlyning_allocator
))
{}
std
::
shared_ptr
<
Allocation
>
ThinAlignedAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
std
::
shared_ptr
<
Allocation
>
(
Allocate
(
size
,
attr
).
release
());
}
bool
ThinAlignedAllocator
::
IsAllocThreadSafe
()
const
{
return
underlying_allocator_
->
IsAllocThreadSafe
();
}
...
...
paddle/fluid/memory/allocation/aligned_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -70,17 +70,15 @@ class AlignedAllocation : public Allocation {
//
// NOTE(yy): This could be an over design. If it harms readability of code, it
// could be removed later.
class
ThinAlignedAllocator
:
public
Managed
Allocator
{
class
ThinAlignedAllocator
:
public
Allocator
{
public:
explicit
ThinAlignedAllocator
(
std
::
shared_ptr
<
ManagedAllocator
>
underlyning_allocator
);
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocator
>
underlyning_allocator
);
bool
IsAllocThreadSafe
()
const
;
protected:
std
::
shared_ptr
<
Managed
Allocator
>
underlying_allocator_
;
std
::
shared_ptr
<
Allocator
>
underlying_allocator_
;
};
// An aligned allocator will allocate `size+kAlignment` allocation and adjust
...
...
paddle/fluid/memory/allocation/allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -24,6 +24,11 @@ bool Allocator::IsAllocThreadSafe() const { return false; }
const
char
*
BadAlloc
::
what
()
const
noexcept
{
return
msg_
.
c_str
();
}
MannualFreeAllocation
::~
MannualFreeAllocation
()
{
allocator_
->
Free
(
this
);
}
std
::
unique_ptr
<
Allocation
>
MannualFreeAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
std
::
unique_ptr
<
Allocation
>
(
AllocateImpl
(
size
,
attr
));
}
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -121,19 +121,30 @@ class Allocator {
virtual
bool
IsAllocThreadSafe
()
const
;
};
// User need to invoke `Free` or `FreeUniquePtr` manually if allocated by
// a manally managed allocator.
class
UnmanagedAllocator
:
public
Allocator
{
class
MannualFreeAllocator
;
class
MannualFreeAllocation
:
public
Allocation
{
public:
virtual
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
=
0
;
MannualFreeAllocation
(
MannualFreeAllocator
*
allocator
,
void
*
ptr
,
size_t
size
,
platform
::
Place
place
)
:
Allocation
(
ptr
,
size
,
place
),
allocator_
(
allocator
)
{}
~
MannualFreeAllocation
();
private:
MannualFreeAllocator
*
allocator_
;
};
//
The allocation will be managed by smart pointers. i.e., users do not need
//
to free allocation manually
.
class
Man
aged
Allocator
:
public
Allocator
{
//
User need to invoke `Free` or `FreeUniquePtr` manually if allocated by
//
a manally managed allocator
.
class
Man
nualFree
Allocator
:
public
Allocator
{
public:
virtual
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
=
kDefault
)
=
0
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
final
;
protected:
virtual
void
Free
(
MannualFreeAllocation
*
allocation
)
=
0
;
virtual
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
=
0
;
friend
class
MannualFreeAllocation
;
};
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/allocator_facade.cc
浏览文件 @
ea81f8ee
...
...
@@ -24,7 +24,6 @@
#include "paddle/fluid/memory/allocation/conditional_allocator.h"
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include "paddle/fluid/memory/allocation/naive_managed_allocator.h"
#include "paddle/fluid/memory/allocation/retry_allocator.h"
#include "paddle/fluid/memory/allocation/zero_size_allocator.h"
#include "paddle/fluid/platform/cpu_info.h"
...
...
@@ -46,34 +45,28 @@ namespace memory {
namespace
allocation
{
// TODO(yy): Dirty code here. This class should be configurable in runtime.
class
CPUManagedAllocator
:
public
Managed
Allocator
{
class
CPUManagedAllocator
:
public
Allocator
{
public:
CPUManagedAllocator
()
:
normal_allocator_
(
NaiveManagedAllocator
::
Create
(
std
::
unique_ptr
<
Allocator
>
(
new
CPUAllocator
())))
{}
CPUManagedAllocator
()
:
normal_allocator_
(
new
CPUAllocator
())
{}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
{
return
normal_allocator_
->
Allocate
(
size
,
attr
);
}
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
{
return
normal_allocator_
->
AllocateShared
(
size
,
attr
);
}
bool
IsAllocThreadSafe
()
const
override
{
return
true
;
}
private:
std
::
shared_ptr
<
Managed
Allocator
>
normal_allocator_
;
std
::
shared_ptr
<
Allocator
>
normal_allocator_
;
};
// TODO(yy): Dirty code here. This class should be configurable in runtime.
class
ChunkedManagedAllocator
:
public
Managed
Allocator
{
class
ChunkedManagedAllocator
:
public
Allocator
{
public:
explicit
ChunkedManagedAllocator
(
std
::
unique_ptr
<
Allocator
>
system_allocator
,
size_t
max_chunk_size
,
size_t
capacity
=
1
,
int64_t
retry_time
=
-
1
)
:
max_chunk_size_
(
max_chunk_size
),
retry_time_
(
retry_time
)
{
raw_allocator_
=
NaiveManagedAllocator
::
Create
(
std
::
move
(
system_allocator
)
);
raw_allocator_
=
std
::
move
(
system_allocator
);
if
(
max_chunk_size_
==
0
)
{
default_allocator_
=
raw_allocator_
;
...
...
@@ -114,11 +107,7 @@ class ChunkedManagedAllocator : public ManagedAllocator {
return
default_allocator_
->
Allocate
(
size
,
attr
);
}
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
{
return
default_allocator_
->
AllocateShared
(
size
,
attr
);
}
std
::
shared_ptr
<
ManagedAllocator
>
BestFitAllocatorCreator
()
{
std
::
shared_ptr
<
Allocator
>
BestFitAllocatorCreator
()
{
chunks_
.
emplace_back
(
raw_allocator_
->
Allocate
(
max_chunk_size_
));
auto
*
allocation
=
chunks_
.
back
().
get
();
std
::
unique_ptr
<
Allocator
>
unmanaged_allocator
(
new
LockedAllocator
(
...
...
@@ -127,12 +116,13 @@ class ChunkedManagedAllocator : public ManagedAllocator {
if
(
retry_time_
<=
0
)
{
VLOG
(
10
)
<<
"Create NaiveManagedAllocator without retry"
;
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
NaiveManagedAllocator
::
Create
(
std
::
move
(
unmanaged_allocator
)
));
std
::
move
(
unmanaged_allocator
));
}
else
{
VLOG
(
10
)
<<
"Create RetryAllocator with retry_time "
<<
retry_time_
<<
"ms"
;
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
RetryAllocator
::
Create
(
std
::
move
(
unmanaged_allocator
),
static_cast
<
size_t
>
(
retry_time_
)));
auto
tmp
=
std
::
make_shared
<
RetryAllocator
>
(
std
::
move
(
unmanaged_allocator
),
static_cast
<
size_t
>
(
retry_time_
));
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
tmp
);
}
}
...
...
@@ -142,8 +132,8 @@ class ChunkedManagedAllocator : public ManagedAllocator {
size_t
max_chunk_size_
;
int64_t
retry_time_
;
std
::
vector
<
std
::
unique_ptr
<
Allocation
>>
chunks_
;
std
::
shared_ptr
<
Managed
Allocator
>
raw_allocator_
;
std
::
shared_ptr
<
Managed
Allocator
>
default_allocator_
;
std
::
shared_ptr
<
Allocator
>
raw_allocator_
;
std
::
shared_ptr
<
Allocator
>
default_allocator_
;
};
#ifdef PADDLE_WITH_CUDA
...
...
@@ -193,7 +183,7 @@ class CUDAPinnedManagedAllocator : public ChunkedManagedAllocator {
class
AllocatorFacadePrivate
{
public:
std
::
map
<
platform
::
Place
,
std
::
shared_ptr
<
Managed
Allocator
>>
allocators_
;
std
::
map
<
platform
::
Place
,
std
::
shared_ptr
<
Allocator
>>
allocators_
;
~
AllocatorFacadePrivate
()
=
default
;
...
...
@@ -245,7 +235,8 @@ AllocatorFacade& AllocatorFacade::Instance() {
std
::
shared_ptr
<
Allocation
>
AllocatorFacade
::
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
,
Allocator
::
Attr
attr
)
{
return
m_
->
allocators_
.
at
(
place
)
->
AllocateShared
(
size
,
attr
);
return
std
::
shared_ptr
<
Allocation
>
(
m_
->
allocators_
.
at
(
place
)
->
Allocate
(
size
,
attr
).
release
());
}
std
::
unique_ptr
<
Allocation
>
AllocatorFacade
::
Alloc
(
const
platform
::
Place
&
place
,
...
...
paddle/fluid/memory/allocation/auto_increment_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -20,20 +20,61 @@ namespace allocation {
std
::
unique_ptr
<
Allocation
>
AutoIncrementAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
InvokeOrCreateUnderlyingAllocator
([
&
](
ManagedAllocator
&
allocator
)
{
return
allocator
.
Allocate
(
size
,
attr
);
});
}
auto
cur
=
prev_success_allocator_
.
load
();
size_t
retry_count
=
allocator_num_
.
load
();
size_t
allocator_num
=
retry_count
;
while
(
retry_count
--
>
0
)
{
// until there retry count is zero
try
{
auto
res
=
underlying_allocators_
[
cur
]
->
Allocate
(
size
,
attr
);
prev_success_allocator_
=
cur
;
return
res
;
}
catch
(
BadAlloc
&
)
{
if
(
++
cur
>=
allocator_num
)
{
cur
=
0
;
}
}
catch
(...)
{
// if there is another type of allocation, just rethrow it.
throw
;
}
}
std
::
shared_ptr
<
Allocation
>
AutoIncrementAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
InvokeOrCreateUnderlyingAllocator
([
&
](
ManagedAllocator
&
allocator
)
{
return
allocator
.
AllocateShared
(
size
,
attr
);
});
// This happens when the first allocator is exhausted and
// there are more than 1 allocation requests
// In this situation, the first allocation request would success
// and the second allocation request would fail if we do not use
// the newly created allocator by the first allocation request.
for
(
cur
=
allocator_num
;
cur
<
allocator_num_
;
++
cur
)
{
try
{
auto
ret
=
underlying_allocators_
[
cur
]
->
Allocate
(
size
,
attr
);
prev_success_allocator_
=
cur
;
return
ret
;
}
catch
(
BadAlloc
&
)
{
}
catch
(...)
{
throw
;
}
}
// No suitable allocator
return
CreateNewAllocator
()
->
Allocate
(
size
,
attr
);
}
bool
AutoIncrementAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
std
::
shared_ptr
<
Allocator
>
AutoIncrementAllocator
::
CreateNewAllocator
()
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
auto
old_size
=
allocator_num_
.
load
();
PADDLE_ENFORCE_LT
(
old_size
,
underlying_allocators_
.
size
(),
"Allocator number exceeds capacity %d"
,
underlying_allocators_
.
size
());
underlying_allocators_
[
old_size
]
=
creator_
();
prev_success_allocator_
=
old_size
;
++
allocator_num_
;
PADDLE_ENFORCE
(
underlying_allocators_
[
old_size
]
->
IsAllocThreadSafe
(),
"the underlying allocator must be thread safe. This is a program "
"bug."
);
return
underlying_allocators_
[
old_size
];
}
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/auto_increment_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -46,76 +46,20 @@ namespace allocation {
// thread-safe std::vector with varying size is hard to implement.
// Fortunately, we can get the total GPU memory and each chunk size.
// Therefore, we can get the suitable capacity of AutoIncrementAllocator.
class
AutoIncrementAllocator
:
public
Managed
Allocator
{
class
AutoIncrementAllocator
:
public
Allocator
{
public:
// Creator is the method to create ManagedAllocator
using
AllocatorCreator
=
std
::
function
<
std
::
shared_ptr
<
Managed
Allocator
>
()
>
;
using
AllocatorCreator
=
std
::
function
<
std
::
shared_ptr
<
Allocator
>
()
>
;
explicit
AutoIncrementAllocator
(
AllocatorCreator
&&
creator
,
size_t
capacity
)
:
creator_
(
std
::
move
(
creator
)),
underlying_allocators_
(
capacity
)
{}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
private:
// NOTE: here use template Callback, it can be inlined when -O3
template
<
typename
Callback
>
inline
typename
std
::
result_of
<
Callback
(
ManagedAllocator
&
)
>::
type
InvokeOrCreateUnderlyingAllocator
(
Callback
callback
)
{
auto
cur
=
prev_success_allocator_
.
load
();
size_t
retry_count
=
allocator_num_
.
load
();
size_t
allocator_num
=
retry_count
;
while
(
retry_count
--
>
0
)
{
// until there retry count is zero
try
{
auto
res
=
callback
(
*
underlying_allocators_
[
cur
]);
prev_success_allocator_
=
cur
;
return
std
::
move
(
res
);
}
catch
(
BadAlloc
&
)
{
if
(
++
cur
>=
allocator_num
)
{
cur
=
0
;
}
}
catch
(...)
{
// if there is another type of allocation, just rethrow it.
throw
;
}
}
// This happens when the first allocator is exhausted and
// there are more than 1 allocation requests
// In this situation, the first allocation request would success
// and the second allocation request would fail if we do not use
// the newly created allocator by the first allocation request.
for
(
cur
=
allocator_num
;
cur
<
allocator_num_
;
++
cur
)
{
try
{
auto
ret
=
callback
(
*
underlying_allocators_
[
cur
]);
prev_success_allocator_
=
cur
;
return
std
::
move
(
ret
);
}
catch
(
BadAlloc
&
)
{
}
catch
(...)
{
throw
;
}
}
// No suitable allocator
ManagedAllocator
*
new_allocator
;
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
auto
old_size
=
allocator_num_
.
load
();
PADDLE_ENFORCE_LT
(
old_size
,
underlying_allocators_
.
size
(),
"Allocator number exceeds capacity %d"
,
underlying_allocators_
.
size
());
underlying_allocators_
[
old_size
]
=
creator_
();
new_allocator
=
underlying_allocators_
[
old_size
].
get
();
prev_success_allocator_
=
old_size
;
++
allocator_num_
;
}
PADDLE_ENFORCE
(
new_allocator
->
IsAllocThreadSafe
(),
"the underlying allocator must be thread safe. This is a program "
"bug."
);
return
callback
(
*
new_allocator
);
}
std
::
shared_ptr
<
Allocator
>
CreateNewAllocator
();
AllocatorCreator
creator_
;
...
...
paddle/fluid/memory/allocation/best_fit_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -45,23 +45,6 @@ BestFitAllocator::BestFitAllocator(Allocation* allocation)
{
chunk
.
size_
,
chunks_
.
begin
()});
}
std
::
unique_ptr
<
Allocation
>
BestFitAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
auto
highest_set_bit
=
static_cast
<
size_t
>
(
HighestBitPos
(
size
));
MapIt
map_it
;
for
(;
highest_set_bit
<
free_chunks_
.
size
();
++
highest_set_bit
)
{
map_it
=
free_chunks_
[
highest_set_bit
].
lower_bound
(
size
);
if
(
map_it
!=
free_chunks_
[
highest_set_bit
].
end
())
{
break
;
}
}
if
(
UNLIKELY
(
highest_set_bit
==
free_chunks_
.
size
()))
{
throw
BadAlloc
(
string
::
Sprintf
(
"Cannot allocate %d, All fragments size is %d"
,
size
,
FreeSize
()));
}
auto
chunk_it
=
SplitChunk
(
size
,
highest_set_bit
,
map_it
);
return
std
::
unique_ptr
<
Allocation
>
(
new
BestFitAllocation
(
this
,
chunk_it
));
}
size_t
BestFitAllocator
::
FreeSize
()
const
{
size_t
acc
=
0
;
for
(
auto
&
array_item
:
free_chunks_
)
{
...
...
@@ -104,8 +87,30 @@ BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size,
return
to_use_it
;
}
void
BestFitAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
auto
*
bf_allocation
=
dynamic_cast
<
BestFitAllocation
*>
(
allocation
.
get
());
void
BestFitAllocator
::
InsertFreeNode
(
const
ListIt
&
it
)
{
auto
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
auto
&
free_map
=
free_chunks_
[
pos
];
free_map
.
insert
({
it
->
size_
,
it
});
}
void
BestFitAllocator
::
EraseFreeNode
(
const
ListIt
&
it
)
{
size_t
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
auto
&
free_map
=
free_chunks_
[
pos
];
auto
map_it
=
free_map
.
find
(
it
->
size_
);
while
(
map_it
->
second
!=
it
&&
map_it
!=
free_map
.
end
())
{
++
map_it
;
}
PADDLE_ENFORCE
(
map_it
!=
free_map
.
end
());
free_map
.
erase
(
map_it
);
}
size_t
BestFitAllocator
::
NumFreeChunks
()
const
{
size_t
num
=
0
;
for
(
auto
&
array_item
:
free_chunks_
)
{
num
+=
array_item
.
size
();
}
return
num
;
}
void
BestFitAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
auto
*
bf_allocation
=
dynamic_cast
<
BestFitAllocation
*>
(
allocation
);
auto
chunk_it
=
bf_allocation
->
ChunkIterator
();
PADDLE_ENFORCE
(
!
chunk_it
->
is_free
);
chunk_it
->
is_free
=
true
;
...
...
@@ -132,38 +137,32 @@ void BestFitAllocator::FreeUniquePtr(std::unique_ptr<Allocation> allocation) {
InsertFreeNode
(
chunk_it
);
}
void
BestFitAllocator
::
InsertFreeNode
(
const
ListIt
&
it
)
{
auto
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
auto
&
free_map
=
free_chunks_
[
pos
];
free_map
.
insert
({
it
->
size_
,
it
});
}
void
BestFitAllocator
::
EraseFreeNode
(
const
ListIt
&
it
)
{
size_t
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
auto
&
free_map
=
free_chunks_
[
pos
];
auto
map_it
=
free_map
.
find
(
it
->
size_
);
while
(
map_it
->
second
!=
it
&&
map_it
!=
free_map
.
end
())
{
++
map_it
;
MannualFreeAllocation
*
BestFitAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
auto
highest_set_bit
=
static_cast
<
size_t
>
(
HighestBitPos
(
size
));
MapIt
map_it
;
for
(;
highest_set_bit
<
free_chunks_
.
size
();
++
highest_set_bit
)
{
map_it
=
free_chunks_
[
highest_set_bit
].
lower_bound
(
size
);
if
(
map_it
!=
free_chunks_
[
highest_set_bit
].
end
())
{
break
;
}
}
PADDLE_ENFORCE
(
map_it
!=
free_map
.
end
());
free_map
.
erase
(
map_it
);
}
size_t
BestFitAllocator
::
NumFreeChunks
()
const
{
size_t
num
=
0
;
for
(
auto
&
array_item
:
free_chunks_
)
{
num
+=
array_item
.
size
();
if
(
UNLIKELY
(
highest_set_bit
==
free_chunks_
.
size
()))
{
throw
BadAlloc
(
string
::
Sprintf
(
"Cannot allocate %d, All fragments size is %d"
,
size
,
FreeSize
()));
}
return
num
;
auto
chunk_it
=
SplitChunk
(
size
,
highest_set_bit
,
map_it
);
return
new
BestFitAllocation
(
this
,
chunk_it
);
}
BestFitAllocation
::
BestFitAllocation
(
paddle
::
memory
::
allocation
::
BestFitAllocator
*
allocator
,
typename
details
::
ChunkList
::
iterator
chunk_it
)
:
Allocation
(
reinterpret_cast
<
void
*>
(
reinterpret_cast
<
uintptr_t
>
(
allocator
->
BasePtr
())
+
chunk_it
->
offset_
),
chunk_it
->
size_
,
allocator
->
Place
()
),
allocator_
(
allocator
),
:
MannualFreeAllocation
(
allocator
,
reinterpret_cast
<
void
*>
(
reinterpret_cast
<
uintptr_t
>
(
allocator
->
BasePtr
())
+
chunk_it
->
offset_
),
chunk_it
->
size_
,
allocator
->
Place
()
),
chunk_it_
(
chunk_it
)
{}
}
// namespace allocation
}
// namespace memory
...
...
paddle/fluid/memory/allocation/best_fit_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -71,7 +71,7 @@ using FreeChunkBin =
class
BestFitAllocator
;
// The BestFitAllocation maintain the List Node iterator.
class
BestFitAllocation
:
public
Allocation
{
class
BestFitAllocation
:
public
MannualFree
Allocation
{
private:
using
ListIt
=
typename
details
::
ChunkList
::
iterator
;
...
...
@@ -81,7 +81,6 @@ class BestFitAllocation : public Allocation {
const
ListIt
&
ChunkIterator
()
const
{
return
chunk_it_
;
}
private:
BestFitAllocator
*
allocator_
;
typename
details
::
ChunkList
::
iterator
chunk_it_
;
};
...
...
@@ -99,7 +98,7 @@ class BestFitAllocation : public Allocation {
//
// To free an allocation, it will set the chunk of allocation to free and merge
// the prev-chunk and the next-chunk when possible.
class
BestFitAllocator
:
public
Unmanaged
Allocator
{
class
BestFitAllocator
:
public
MannualFree
Allocator
{
public:
explicit
BestFitAllocator
(
Allocation
*
allocation
);
...
...
@@ -107,9 +106,9 @@ class BestFitAllocator : public UnmanagedAllocator {
const
platform
::
Place
&
Place
()
const
{
return
allocation_
->
place
();
}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
//
std::unique_ptr<Allocation> Allocate(size_t size,
//
Attr attr = kDefault) override;
//
void FreeUniquePtr(std::unique_ptr<Allocation> allocation) override;
size_t
NumFreeChunks
()
const
;
...
...
@@ -123,6 +122,12 @@ class BestFitAllocator : public UnmanagedAllocator {
void
EraseFreeNode
(
const
ListIt
&
it
);
void
InsertFreeNode
(
const
ListIt
&
it
);
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
Allocation
*
allocation_
;
// not owned
details
::
ChunkList
chunks_
;
details
::
FreeChunkBin
free_chunks_
;
...
...
paddle/fluid/memory/allocation/buffered_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -16,14 +16,14 @@
#include <algorithm>
#include <limits>
#include <utility>
#include "paddle/fluid/memory/allocation/underlying_manual_allocation.h"
namespace
paddle
{
namespace
memory
{
namespace
allocation
{
BufferedAllocator
::
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>&&
allocator
)
{
underlying_allocator_
.
reset
(
dynamic_cast
<
UnmanagedAllocator
*>
(
allocator
.
release
()));
BufferedAllocator
::
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
)
:
underlying_allocator_
(
std
::
move
(
allocator
))
{
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator_
,
"Underlying allocator of BufferedAllocator must be unmanaged"
);
...
...
@@ -34,26 +34,6 @@ BufferedAllocator::BufferedAllocator(std::unique_ptr<Allocator>&& allocator) {
BufferedAllocator
::~
BufferedAllocator
()
{
FreeCache
(
-
1UL
);
}
std
::
unique_ptr
<
Allocation
>
BufferedAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
auto
it
=
allocations_
.
lower_bound
(
size
);
if
(
it
!=
allocations_
.
end
()
&&
it
->
first
<
size
*
2
)
{
std
::
unique_ptr
<
Allocation
>
result
(
std
::
move
(
it
->
second
));
allocations_
.
erase
(
it
);
return
result
;
}
}
try
{
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
catch
(
BadAlloc
&
)
{
FreeCache
(
size
);
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
}
void
BufferedAllocator
::
FreeCache
(
size_t
size
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
if
(
UNLIKELY
(
size
==
0
))
return
;
...
...
@@ -61,19 +41,42 @@ void BufferedAllocator::FreeCache(size_t size) {
while
(
!
allocations_
.
empty
())
{
// free the largest
auto
it
=
--
allocations_
.
end
();
cur
+=
it
->
second
->
size
();
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
it
->
second
));
allocations_
.
erase
(
it
);
if
(
cur
>=
size
)
return
;
}
}
void
BufferedAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
bool
BufferedAllocator
::
IsAllocThreadSafe
()
const
{
return
this
->
underlying_allocator_
->
IsAllocThreadSafe
();
}
void
BufferedAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
allocations_
.
emplace
(
allocation
->
size
(),
std
::
move
(
allocation
));
std
::
unique_ptr
<
Allocation
>
new_allocation
(
new
UnderlyingManualAllocation
(
this
,
std
::
move
(
reinterpret_cast
<
UnderlyingManualAllocation
*>
(
allocation
)
->
allocation_
)));
allocations_
.
emplace
(
allocation
->
size
(),
std
::
move
(
new_allocation
));
}
MannualFreeAllocation
*
BufferedAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
auto
it
=
allocations_
.
lower_bound
(
size
);
if
(
it
!=
allocations_
.
end
()
&&
it
->
first
<
size
*
2
)
{
std
::
unique_ptr
<
Allocation
>
result
(
std
::
move
(
it
->
second
));
allocations_
.
erase
(
it
);
return
new
UnderlyingManualAllocation
(
this
,
std
::
move
(
result
));
}
}
bool
BufferedAllocator
::
IsAllocThreadSafe
()
const
{
return
this
->
underlying_allocator_
->
IsAllocThreadSafe
();
try
{
return
new
UnderlyingManualAllocation
(
this
,
underlying_allocator_
->
Allocate
(
size
,
attr
));
}
catch
(
BadAlloc
&
)
{
FreeCache
(
size
);
return
new
UnderlyingManualAllocation
(
this
,
underlying_allocator_
->
Allocate
(
size
,
attr
));
}
}
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/buffered_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -29,16 +29,17 @@ namespace allocation {
// memory allocation and reuse memory.
// BufferedAllocator provides the same thread-safety level as
// underlying_allocator_
class
BufferedAllocator
:
public
Unmanaged
Allocator
{
class
BufferedAllocator
:
public
MannualFree
Allocator
{
public:
explicit
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
);
explicit
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
);
~
BufferedAllocator
();
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
=
Allocator
::
Attr
::
kDefault
)
override
;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
// std::unique_ptr<Allocation> Allocate(
// size_t size, Allocator::Attr attr = Allocator::Attr::kDefault)
// override;
//
// void FreeUniquePtr(std::unique_ptr<Allocation> allocation) override;
bool
IsAllocThreadSafe
()
const
override
;
...
...
@@ -48,7 +49,13 @@ class BufferedAllocator : public UnmanagedAllocator {
private:
void
FreeCache
(
size_t
size
);
std
::
unique_ptr
<
UnmanagedAllocator
>
underlying_allocator_
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
std
::
unique_ptr
<
Allocator
>
underlying_allocator_
;
std
::
multimap
<
size_t
,
std
::
unique_ptr
<
Allocation
>>
allocations_
;
std
::
unique_ptr
<
std
::
mutex
>
mtx_
;
};
...
...
paddle/fluid/memory/allocation/conditional_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -20,23 +20,27 @@ namespace allocation {
ConditionalAllocator
&
ConditionalAllocator
::
AddAllocator
(
std
::
function
<
bool
(
size_t
,
Allocator
::
Attr
)
>
func
,
std
::
shared_ptr
<
Managed
Allocator
>
allocator
)
{
std
::
shared_ptr
<
Allocator
>
allocator
)
{
underlying_allocators_
.
emplace_back
(
std
::
move
(
func
),
std
::
move
(
allocator
));
return
*
this
;
}
std
::
unique_ptr
<
Allocation
>
ConditionalAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
SelectAndInvoke
(
size
,
attr
,
[
&
](
ManagedAllocator
&
allocator
)
{
return
allocator
.
Allocate
(
size
,
attr
);
});
for
(
auto
&
pair
:
underlying_allocators_
)
{
if
(
pair
.
first
(
size
,
attr
))
{
return
pair
.
second
->
Allocate
(
size
,
attr
);
}
}
throw
BadAlloc
(
"No suitable allocator"
);
}
std
::
shared_ptr
<
Allocation
>
ConditionalAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
SelectAndInvoke
(
size
,
attr
,
[
&
](
ManagedAllocator
&
allocator
)
{
return
allocator
.
AllocateShared
(
size
,
attr
);
});
bool
ConditionalAllocator
::
IsAllocThreadSafe
()
const
{
return
std
::
all_of
(
underlying_allocators_
.
begin
(),
underlying_allocators_
.
end
(),
[](
const
AllocatorWithCond
&
allocatorWithCond
)
{
return
allocatorWithCond
.
second
->
IsAllocThreadSafe
();
});
}
bool
ConditionalAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
}
// namespace allocation
}
// namespace memory
...
...
paddle/fluid/memory/allocation/conditional_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -38,32 +38,21 @@ namespace allocation {
// // else
// return true;
// }, allocator_c);
class
ConditionalAllocator
:
public
Managed
Allocator
{
class
ConditionalAllocator
:
public
Allocator
{
public:
ConditionalAllocator
()
=
default
;
ConditionalAllocator
&
AddAllocator
(
std
::
function
<
bool
(
size_t
,
Attr
)
>
func
,
std
::
shared_ptr
<
ManagedAllocator
>
allocator
);
ConditionalAllocator
&
AddAllocator
(
std
::
function
<
bool
(
size_t
,
Attr
)
>
func
,
std
::
shared_ptr
<
Allocator
>
allocator
);
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
private:
template
<
typename
Callback
>
inline
typename
std
::
result_of
<
Callback
(
ManagedAllocator
&
)
>::
type
SelectAndInvoke
(
size_t
size
,
Attr
attr
,
Callback
callback
)
{
for
(
auto
&
pair
:
underlying_allocators_
)
{
if
(
pair
.
first
(
size
,
attr
))
{
return
callback
(
*
pair
.
second
);
}
}
PADDLE_THROW
(
"No suitable allocator"
);
}
std
::
vector
<
std
::
pair
<
std
::
function
<
bool
(
size_t
,
Attr
)
>
,
std
::
shared_ptr
<
ManagedAllocator
>>>
underlying_allocators_
;
using
AllocatorWithCond
=
std
::
pair
<
std
::
function
<
bool
(
size_t
,
Attr
)
>
,
std
::
shared_ptr
<
Allocator
>>
;
std
::
vector
<
AllocatorWithCond
>
underlying_allocators_
;
};
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/cpu_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -20,21 +20,27 @@ namespace paddle {
namespace
memory
{
namespace
allocation
{
std
::
unique_ptr
<
Allocation
>
CPUAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
void
*
ptr
;
CPUAllocation
::
CPUAllocation
(
paddle
::
memory
::
allocation
::
CPUAllocator
*
allocator
,
void
*
ptr
,
size_t
size
)
:
MannualFreeAllocation
(
allocator
,
ptr
,
size
,
platform
::
CPUPlace
())
{}
bool
CPUAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
void
CPUAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
CPUAllocation
*>
(
allocation
));
free
(
allocation
->
ptr
());
}
MannualFreeAllocation
*
CPUAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
void
*
ptr
;
auto
status
=
posix_memalign
(
&
ptr
,
kAlignment
,
size
);
if
(
UNLIKELY
(
status
)
!=
0
)
{
throw
BadAlloc
(
string
::
Sprintf
(
"Cannot allocate cpu memory %d. Errno is %d"
,
size
,
status
));
}
return
std
::
unique_ptr
<
Allocation
>
(
new
CPUAllocation
(
ptr
,
size
));
}
void
CPUAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
CPUAllocation
*>
(
allocation
.
get
()));
free
(
allocation
->
ptr
());
return
new
CPUAllocation
(
this
,
ptr
,
size
);
}
bool
CPUAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/cpu_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -25,19 +25,21 @@ namespace allocation {
//
// NOTE(yy): It is no need to use `BestFitAllocator` in CPU. We can import
// an open-sourced allocator into Paddle.
class
CPUAllocation
:
public
Allocation
{
class
CPUAllocator
;
class
CPUAllocation
:
public
MannualFreeAllocation
{
public:
CPUAllocation
(
void
*
ptr
,
size_t
size
)
:
Allocation
(
ptr
,
size
,
platform
::
CPUPlace
())
{}
CPUAllocation
(
CPUAllocator
*
allocator
,
void
*
ptr
,
size_t
size
);
};
class
CPUAllocator
:
public
Unmanaged
Allocator
{
class
CPUAllocator
:
public
MannualFree
Allocator
{
public:
constexpr
static
size_t
kAlignment
=
64u
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
};
}
// namespace allocation
}
// namespace memory
...
...
paddle/fluid/memory/allocation/locked_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -14,36 +14,32 @@
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include <mutex> // NOLINT
#include "paddle/fluid/memory/allocation/underlying_manual_allocation.h"
#include "paddle/fluid/platform/lock_guard_ptr.h"
namespace
paddle
{
namespace
memory
{
namespace
allocation
{
std
::
unique_ptr
<
Allocation
>
LockedAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
if
(
underlying_allocator_
->
IsAllocThreadSafe
())
{
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
else
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
}
void
LockedAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
if
(
underlying_allocator_
->
IsAllocThreadSafe
())
{
return
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
allocation
));
}
else
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
return
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
allocation
));
}
}
bool
LockedAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
LockedAllocator
::
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
underlying_allocator
)
{
auto
*
allocator
=
dynamic_cast
<
UnmanagedAllocator
*>
(
underlying_allocator
.
get
());
PADDLE_ENFORCE_NOT_NULL
(
allocator
);
underlying_allocator
.
release
();
underlying_allocator_
.
reset
(
allocator
);
std
::
unique_ptr
<
Allocator
>
&&
underlying_allocator
)
:
underlying_allocator_
(
std
::
move
(
underlying_allocator
))
{
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator_
);
if
(
!
underlying_allocator_
->
IsAllocThreadSafe
())
{
mtx_
.
reset
(
new
std
::
mutex
());
}
}
void
LockedAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
reinterpret_cast
<
UnderlyingManualAllocation
*>
(
allocation
)
->
allocation_
.
reset
();
}
MannualFreeAllocation
*
LockedAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
return
new
UnderlyingManualAllocation
(
this
,
underlying_allocator_
->
Allocate
(
size
,
attr
));
}
}
// namespace allocation
}
// namespace memory
...
...
paddle/fluid/memory/allocation/locked_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -22,17 +22,19 @@ namespace memory {
namespace
allocation
{
// A allocator to make underlying allocator thread safe.
class
LockedAllocator
:
public
Unmanaged
Allocator
{
class
LockedAllocator
:
public
MannualFree
Allocator
{
public:
explicit
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>&&
underlying_allocator
);
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
explicit
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
underlying_allocator
);
bool
IsAllocThreadSafe
()
const
override
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
std
::
unique_ptr
<
Unmanaged
Allocator
>
underlying_allocator_
;
std
::
mutex
mtx_
;
std
::
unique_ptr
<
Allocator
>
underlying_allocator_
;
std
::
unique_ptr
<
std
::
mutex
>
mtx_
;
};
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/naive_managed_allocator.cc
已删除
100644 → 0
浏览文件 @
02631965
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/allocation/naive_managed_allocator.h"
namespace
paddle
{
namespace
memory
{
namespace
allocation
{
NaiveManagedAllocator
::
NaiveManagedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
)
{
auto
*
underlying_allocator
=
dynamic_cast
<
UnmanagedAllocator
*>
(
allocator
.
get
());
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator
);
allocator
.
release
();
Init
(
std
::
unique_ptr
<
UnmanagedAllocator
>
(
underlying_allocator
));
}
NaiveManagedAllocator
::
NaiveManagedAllocator
(
std
::
unique_ptr
<
UnmanagedAllocator
>
&&
allocator
)
{
Init
(
std
::
move
(
allocator
));
}
void
NaiveManagedAllocator
::
Init
(
std
::
unique_ptr
<
UnmanagedAllocator
>
&&
allocator
)
{
underlying_allocator_
=
std
::
move
(
allocator
);
}
bool
NaiveManagedAllocator
::
IsAllocThreadSafe
()
const
{
return
underlying_allocator_
->
IsAllocThreadSafe
();
}
std
::
unique_ptr
<
Allocation
>
NaiveManagedAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
std
::
unique_ptr
<
Allocation
>
allocation
=
underlying_allocator_
->
Allocate
(
size
,
attr
);
return
std
::
unique_ptr
<
Allocation
>
(
new
NaiveManagedAllocation
(
std
::
move
(
allocation
),
shared_from_this
()));
}
std
::
shared_ptr
<
Allocation
>
NaiveManagedAllocator
::
AllocateShared
(
size_t
size
,
Attr
attr
)
{
std
::
unique_ptr
<
Allocation
>
allocation
=
underlying_allocator_
->
Allocate
(
size
,
attr
);
return
std
::
shared_ptr
<
Allocation
>
(
new
NaiveManagedAllocation
(
std
::
move
(
allocation
),
shared_from_this
()));
}
NaiveManagedAllocation
::~
NaiveManagedAllocation
()
{
auto
allocator
=
allocator_
.
lock
();
if
(
UNLIKELY
(
allocator
==
nullptr
))
{
// the allocator is destructed before allocations.
// do nothing.
return
;
}
// invoke Free
allocator
->
UnderlyingAllocator
().
FreeUniquePtr
(
std
::
move
(
underlying_allocation_
));
}
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/naive_managed_allocator.h
已删除
100644 → 0
浏览文件 @
02631965
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory>
#include "paddle/fluid/memory/allocation/allocator.h"
namespace
paddle
{
namespace
memory
{
namespace
allocation
{
// An allocator to wrap an UnmanagedAllocator and make the allocation managed
// by C++ smart ptr.
//
// NOTE: if the NaiveManagedAllocator is destroyed before
// NaiveManagedAllocations, the allocation will never be released.
class
NaiveManagedAllocator
;
class
NaiveManagedAllocation
:
public
Allocation
{
public:
NaiveManagedAllocation
(
std
::
unique_ptr
<
Allocation
>&&
underlying_allocation
,
std
::
shared_ptr
<
NaiveManagedAllocator
>
allocator
)
:
Allocation
(
underlying_allocation
->
ptr
(),
underlying_allocation
->
size
(),
underlying_allocation
->
place
()),
underlying_allocation_
(
std
::
move
(
underlying_allocation
)),
allocator_
(
allocator
)
{}
~
NaiveManagedAllocation
()
final
;
private:
std
::
unique_ptr
<
Allocation
>
underlying_allocation_
;
std
::
weak_ptr
<
NaiveManagedAllocator
>
allocator_
;
};
class
NaiveManagedAllocator
:
public
ManagedAllocator
,
public
std
::
enable_shared_from_this
<
NaiveManagedAllocator
>
{
public:
template
<
typename
...
ARGS
>
static
std
::
shared_ptr
<
ManagedAllocator
>
Create
(
ARGS
...
args
)
{
return
std
::
static_pointer_cast
<
ManagedAllocator
>
(
std
::
shared_ptr
<
NaiveManagedAllocator
>
(
new
NaiveManagedAllocator
(
std
::
move
(
args
)...)));
}
inline
UnmanagedAllocator
&
UnderlyingAllocator
()
{
return
*
underlying_allocator_
;
}
bool
IsAllocThreadSafe
()
const
override
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
private:
explicit
NaiveManagedAllocator
(
std
::
unique_ptr
<
Allocator
>&&
allocator
);
explicit
NaiveManagedAllocator
(
std
::
unique_ptr
<
UnmanagedAllocator
>&&
allocator
);
void
Init
(
std
::
unique_ptr
<
UnmanagedAllocator
>&&
allocator
);
std
::
unique_ptr
<
UnmanagedAllocator
>
underlying_allocator_
;
};
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/retry_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -18,29 +18,25 @@ namespace paddle {
namespace
memory
{
namespace
allocation
{
RetryAllocation
::~
RetryAllocation
()
{
auto
allocator
=
retry_allocator_
.
lock
();
// Allocator is destroyed before allocation. Should not happened usually.
if
(
UNLIKELY
(
allocator
==
nullptr
))
return
;
allocator
->
FreeUnderlyingAllocation
(
std
::
move
(
underlying_allocation_
));
bool
RetryAllocator
::
IsAllocThreadSafe
()
const
{
return
underlying_allocator_
->
IsAllocThreadSafe
();
}
bool
RetryAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
std
::
shared_ptr
<
Allocation
>
RetryAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
std
::
shared_ptr
<
Allocation
>
(
AllocateImpl
(
size
,
attr
));
}
std
::
unique_ptr
<
Allocation
>
RetryAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
std
::
unique_ptr
<
Allocation
>
(
AllocateImpl
(
size
,
attr
));
void
RetryAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
reinterpret_cast
<
RetryAllocation
*>
(
allocation
)
->
underlying_allocation_
.
reset
();
{
// notify all waited allocators, they can try to allocate memory after free.
std
::
lock_guard
<
std
::
mutex
>
lock
(
mutex_
);
cv_
.
notify_all
();
}
}
Allocation
*
RetryAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
MannualFreeAllocation
*
RetryAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
auto
alloc_func
=
[
&
,
this
]()
{
return
new
RetryAllocation
(
underlying_allocator_
->
Allocate
(
size
,
attr
),
this
->
shared_from_this
()
);
this
);
};
// In fact, we can unify the code of allocation success and failure
// But it would add lock even when allocation success at the first time
...
...
@@ -73,15 +69,6 @@ Allocation* RetryAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
throw
;
}
}
void
RetryAllocator
::
FreeUnderlyingAllocation
(
std
::
unique_ptr
<
Allocation
>&&
allocation
)
{
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
allocation
));
{
// notify all waited allocators, they can try to allocate memory after free.
std
::
lock_guard
<
std
::
mutex
>
lock
(
mutex_
);
cv_
.
notify_all
();
}
}
}
// namespace allocation
}
// namespace memory
...
...
paddle/fluid/memory/allocation/retry_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -26,52 +26,27 @@ namespace allocation {
class
RetryAllocator
;
class
RetryAllocation
:
public
Allocation
{
class
RetryAllocation
:
public
MannualFree
Allocation
{
public:
RetryAllocation
(
std
::
unique_ptr
<
Allocation
>&&
underlying_allocation
,
const
std
::
shared_ptr
<
RetryAllocator
>&
retry_allocator
)
:
Allocation
(
underlying_allocation
->
ptr
(),
underlying_allocation
->
size
(),
underlying_allocation
->
place
()),
underlying_allocation_
(
std
::
move
(
underlying_allocation
)),
retry_allocator_
(
retry_allocator
)
{}
~
RetryAllocation
()
final
;
private:
MannualFreeAllocator
*
allocator
)
:
MannualFreeAllocation
(
allocator
,
underlying_allocation
->
ptr
(),
underlying_allocation
->
size
(),
underlying_allocation
->
place
()),
underlying_allocation_
(
std
::
move
(
underlying_allocation
))
{}
std
::
unique_ptr
<
Allocation
>
underlying_allocation_
;
std
::
weak_ptr
<
RetryAllocator
>
retry_allocator_
;
};
class
RetryAllocator
:
public
ManagedAllocator
,
public
std
::
enable_shared_from_this
<
RetryAllocator
>
{
private:
class
RetryAllocator
:
public
MannualFreeAllocator
{
public:
RetryAllocator
(
std
::
unique_ptr
<
Allocator
>&&
allocator
,
size_t
retry_ms
)
:
underlying_allocator_
(
dynamic_cast
<
UnmanagedAllocator
*>
(
allocator
.
release
())),
retry_time_
(
retry_ms
)
{
:
underlying_allocator_
(
std
::
move
(
allocator
)),
retry_time_
(
retry_ms
)
{
EnforceCheck
();
}
public:
template
<
typename
...
Args
>
static
std
::
shared_ptr
<
ManagedAllocator
>
Create
(
Args
...
args
)
{
return
std
::
shared_ptr
<
ManagedAllocator
>
(
new
RetryAllocator
(
std
::
forward
<
Args
>
(
args
)...));
}
bool
IsAllocThreadSafe
()
const
override
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
void
FreeUnderlyingAllocation
(
std
::
unique_ptr
<
Allocation
>&&
allocation
);
private:
Allocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
);
void
EnforceCheck
()
{
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator_
.
get
(),
...
...
@@ -80,7 +55,13 @@ class RetryAllocator : public ManagedAllocator,
"UnderlyingAllocator of RetryAllocator must be thread-safe"
);
}
std
::
unique_ptr
<
UnmanagedAllocator
>
underlying_allocator_
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
std
::
unique_ptr
<
Allocator
>
underlying_allocator_
;
std
::
chrono
::
milliseconds
retry_time_
;
std
::
mutex
mutex_
;
std
::
condition_variable
cv_
;
...
...
paddle/fluid/memory/allocation/
naive_managed_allocator_test.cc
→
paddle/fluid/memory/allocation/
underlying_manual_allocation.h
浏览文件 @
ea81f8ee
...
...
@@ -12,71 +12,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/allocation/naive_managed_allocator.h"
#include <atomic> // NOLINT
#include <random>
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#pragma once
#include "paddle/fluid/memory/allocation/allocator.h"
namespace
paddle
{
namespace
memory
{
namespace
allocation
{
class
StubAllocator
:
public
UnmanagedAllocator
{
class
UnderlyingManualAllocation
:
public
MannualFreeAllocation
{
public:
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
{
counter_
.
fetch_add
(
1
);
return
std
::
unique_ptr
<
Allocation
>
(
new
Allocation
(
nullptr
,
size
,
platform
::
CPUPlace
()));
}
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
{
counter_
.
fetch_sub
(
1
);
}
bool
IsAllocThreadSafe
()
const
override
{
return
true
;
}
std
::
atomic
<
int
>
counter_
{
0
};
UnderlyingManualAllocation
(
MannualFreeAllocator
*
allocator
,
std
::
unique_ptr
<
Allocation
>
allocation
)
:
MannualFreeAllocation
(
allocator
,
allocation
->
ptr
(),
allocation
->
size
(),
allocation
->
place
()),
allocation_
(
std
::
move
(
allocation
))
{}
std
::
unique_ptr
<
Allocation
>
allocation_
;
};
TEST
(
NaiveManagedAllocator
,
main
)
{
auto
allocator
=
NaiveManagedAllocator
::
Create
(
std
::
unique_ptr
<
Allocator
>
(
new
StubAllocator
()));
auto
th_main
=
[
=
]
{
std
::
random_device
dev
;
std
::
default_random_engine
engine
(
dev
());
std
::
uniform_int_distribution
<
int
>
dist
(
0
,
1
);
std
::
vector
<
std
::
shared_ptr
<
Allocation
>>
allocations
;
for
(
int
j
=
0
;
j
<
1024
;
++
j
)
{
bool
to_insert
=
static_cast
<
bool
>
(
dist
(
engine
));
if
(
to_insert
)
{
allocations
.
emplace_back
(
allocator
->
AllocateShared
(
10
));
}
else
{
if
(
!
allocations
.
empty
())
{
allocations
.
pop_back
();
}
}
}
};
{
std
::
vector
<
std
::
thread
>
threads
;
for
(
size_t
i
=
0
;
i
<
1024
;
++
i
)
{
threads
.
emplace_back
(
th_main
);
}
for
(
auto
&
th
:
threads
)
{
th
.
join
();
}
}
ASSERT_EQ
(
reinterpret_cast
<
StubAllocator
&>
(
std
::
dynamic_pointer_cast
<
NaiveManagedAllocator
>
(
allocator
)
->
UnderlyingAllocator
())
.
counter_
,
0
);
}
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/zero_size_allocator.cc
浏览文件 @
ea81f8ee
...
...
@@ -26,15 +26,10 @@ std::unique_ptr<Allocation> ZeroSizeAllocator::Allocate(size_t size,
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
}
std
::
shared_ptr
<
Allocation
>
ZeroSizeAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
if
(
size
==
0
)
{
return
std
::
shared_ptr
<
Allocation
>
(
new
ZeroSizeAllocation
(
place_
));
}
else
{
return
underlying_allocator_
->
AllocateShared
(
size
,
attr
);
}
bool
ZeroSizeAllocator
::
IsAllocThreadSafe
()
const
{
return
underlying_allocator_
->
IsAllocThreadSafe
();
}
bool
ZeroSizeAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/zero_size_allocator.h
浏览文件 @
ea81f8ee
...
...
@@ -12,10 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <utility>
#pragma once
#include <utility>
#include "paddle/fluid/memory/allocation/allocator.h"
namespace
paddle
{
...
...
@@ -31,18 +29,17 @@ class ZeroSizeAllocation : public Allocation {
:
Allocation
(
nullptr
,
0
,
p
)
{}
};
class
ZeroSizeAllocator
:
public
Managed
Allocator
{
class
ZeroSizeAllocator
:
public
Allocator
{
public:
ZeroSizeAllocator
(
const
std
::
shared_ptr
<
ManagedAllocator
>&
underlying_allocator
,
const
platform
::
Place
&
p
)
:
underlying_allocator_
(
underlying_allocator
),
place_
(
p
)
{}
ZeroSizeAllocator
(
std
::
shared_ptr
<
Allocator
>
underlying_allocator
,
const
platform
::
Place
&
p
)
:
underlying_allocator_
(
std
::
move
(
underlying_allocator
)),
place_
(
p
)
{}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
private:
std
::
shared_ptr
<
Managed
Allocator
>
underlying_allocator_
;
std
::
shared_ptr
<
Allocator
>
underlying_allocator_
;
const
platform
::
Place
&
place_
;
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录