Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
ea81f8ee
P
Paddle
项目概览
PaddlePaddle
/
Paddle
接近 2 年 前同步成功
通知
2322
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ea81f8ee
编写于
11月 14, 2018
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Clean interface of allocator
Clean managed/umnamaged allocator
上级
02631965
变更
25
隐藏空白更改
内联
并排
Showing
25 changed file
with
347 addition
and
503 deletion
+347
-503
paddle/fluid/memory/allocation/CMakeLists.txt
paddle/fluid/memory/allocation/CMakeLists.txt
+1
-5
paddle/fluid/memory/allocation/aligned_allocator.cc
paddle/fluid/memory/allocation/aligned_allocator.cc
+1
-6
paddle/fluid/memory/allocation/aligned_allocator.h
paddle/fluid/memory/allocation/aligned_allocator.h
+3
-5
paddle/fluid/memory/allocation/allocator.cc
paddle/fluid/memory/allocation/allocator.cc
+5
-0
paddle/fluid/memory/allocation/allocator.h
paddle/fluid/memory/allocation/allocator.h
+20
-9
paddle/fluid/memory/allocation/allocator_facade.cc
paddle/fluid/memory/allocation/allocator_facade.cc
+15
-24
paddle/fluid/memory/allocation/auto_increment_allocator.cc
paddle/fluid/memory/allocation/auto_increment_allocator.cc
+50
-9
paddle/fluid/memory/allocation/auto_increment_allocator.h
paddle/fluid/memory/allocation/auto_increment_allocator.h
+5
-61
paddle/fluid/memory/allocation/best_fit_allocator.cc
paddle/fluid/memory/allocation/best_fit_allocator.cc
+43
-44
paddle/fluid/memory/allocation/best_fit_allocator.h
paddle/fluid/memory/allocation/best_fit_allocator.h
+11
-6
paddle/fluid/memory/allocation/buffered_allocator.cc
paddle/fluid/memory/allocation/buffered_allocator.cc
+31
-28
paddle/fluid/memory/allocation/buffered_allocator.h
paddle/fluid/memory/allocation/buffered_allocator.h
+14
-7
paddle/fluid/memory/allocation/conditional_allocator.cc
paddle/fluid/memory/allocation/conditional_allocator.cc
+14
-10
paddle/fluid/memory/allocation/conditional_allocator.h
paddle/fluid/memory/allocation/conditional_allocator.h
+8
-19
paddle/fluid/memory/allocation/cpu_allocator.cc
paddle/fluid/memory/allocation/cpu_allocator.cc
+15
-9
paddle/fluid/memory/allocation/cpu_allocator.h
paddle/fluid/memory/allocation/cpu_allocator.h
+9
-7
paddle/fluid/memory/allocation/locked_allocator.cc
paddle/fluid/memory/allocation/locked_allocator.cc
+19
-23
paddle/fluid/memory/allocation/locked_allocator.h
paddle/fluid/memory/allocation/locked_allocator.h
+9
-7
paddle/fluid/memory/allocation/naive_managed_allocator.cc
paddle/fluid/memory/allocation/naive_managed_allocator.cc
+0
-69
paddle/fluid/memory/allocation/naive_managed_allocator.h
paddle/fluid/memory/allocation/naive_managed_allocator.h
+0
-76
paddle/fluid/memory/allocation/retry_allocator.cc
paddle/fluid/memory/allocation/retry_allocator.cc
+13
-26
paddle/fluid/memory/allocation/retry_allocator.h
paddle/fluid/memory/allocation/retry_allocator.h
+16
-35
paddle/fluid/memory/allocation/underlying_manual_allocation.h
...le/fluid/memory/allocation/underlying_manual_allocation.h
+35
-0
paddle/fluid/memory/allocation/zero_size_allocator.cc
paddle/fluid/memory/allocation/zero_size_allocator.cc
+3
-8
paddle/fluid/memory/allocation/zero_size_allocator.h
paddle/fluid/memory/allocation/zero_size_allocator.h
+7
-10
未找到文件。
paddle/fluid/memory/allocation/CMakeLists.txt
浏览文件 @
ea81f8ee
...
@@ -29,9 +29,6 @@ else()
...
@@ -29,9 +29,6 @@ else()
cpu_allocator
)
cpu_allocator
)
endif
()
endif
()
cc_library
(
naive_managed_allocator SRCS naive_managed_allocator.cc DEPS allocator
)
cc_test
(
naive_managed_allocator_test SRCS naive_managed_allocator_test.cc DEPS naive_managed_allocator
)
nv_library
(
pinned_allocator SRCS pinned_allocator.cc DEPS allocator
)
nv_library
(
pinned_allocator SRCS pinned_allocator.cc DEPS allocator
)
if
(
WITH_GPU
)
if
(
WITH_GPU
)
set
(
AllocatorFacadeDeps gpu_info cuda_allocator pinned_allocator cuda_device_guard
)
set
(
AllocatorFacadeDeps gpu_info cuda_allocator pinned_allocator cuda_device_guard
)
...
@@ -49,7 +46,6 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS
...
@@ -49,7 +46,6 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS
cpu_allocator
cpu_allocator
locked_allocator
locked_allocator
best_fit_allocator
best_fit_allocator
naive_managed_allocator
aligned_allocator
aligned_allocator
auto_increment_allocator
auto_increment_allocator
zero_size_allocator
zero_size_allocator
...
@@ -61,6 +57,6 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS
...
@@ -61,6 +57,6 @@ cc_library(allocator_facade SRCS allocator_facade.cc DEPS
nv_test
(
allocation_and_eigen_test SRCS allocation_and_eigen_test.cu DEPS allocator_facade
)
nv_test
(
allocation_and_eigen_test SRCS allocation_and_eigen_test.cu DEPS allocator_facade
)
cc_test
(
retry_allocator_test SRCS retry_allocator_test.cc DEPS retry_allocator
naive_managed_allocator
best_fit_allocator locked_allocator cpu_allocator
)
cc_test
(
retry_allocator_test SRCS retry_allocator_test.cc DEPS retry_allocator best_fit_allocator locked_allocator cpu_allocator
)
cc_test
(
allocator_facade_test SRCS allocator_facade_test.cc DEPS allocator_facade
)
cc_test
(
allocator_facade_test SRCS allocator_facade_test.cc DEPS allocator_facade
)
paddle/fluid/memory/allocation/aligned_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -19,14 +19,9 @@ namespace memory {
...
@@ -19,14 +19,9 @@ namespace memory {
namespace
allocation
{
namespace
allocation
{
ThinAlignedAllocator
::
ThinAlignedAllocator
(
ThinAlignedAllocator
::
ThinAlignedAllocator
(
std
::
shared_ptr
<
Managed
Allocator
>
underlyning_allocator
)
std
::
shared_ptr
<
Allocator
>
underlyning_allocator
)
:
underlying_allocator_
(
std
::
move
(
underlyning_allocator
))
{}
:
underlying_allocator_
(
std
::
move
(
underlyning_allocator
))
{}
std
::
shared_ptr
<
Allocation
>
ThinAlignedAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
std
::
shared_ptr
<
Allocation
>
(
Allocate
(
size
,
attr
).
release
());
}
bool
ThinAlignedAllocator
::
IsAllocThreadSafe
()
const
{
bool
ThinAlignedAllocator
::
IsAllocThreadSafe
()
const
{
return
underlying_allocator_
->
IsAllocThreadSafe
();
return
underlying_allocator_
->
IsAllocThreadSafe
();
}
}
...
...
paddle/fluid/memory/allocation/aligned_allocator.h
浏览文件 @
ea81f8ee
...
@@ -70,17 +70,15 @@ class AlignedAllocation : public Allocation {
...
@@ -70,17 +70,15 @@ class AlignedAllocation : public Allocation {
//
//
// NOTE(yy): This could be an over design. If it harms readability of code, it
// NOTE(yy): This could be an over design. If it harms readability of code, it
// could be removed later.
// could be removed later.
class
ThinAlignedAllocator
:
public
Managed
Allocator
{
class
ThinAlignedAllocator
:
public
Allocator
{
public:
public:
explicit
ThinAlignedAllocator
(
explicit
ThinAlignedAllocator
(
std
::
shared_ptr
<
ManagedAllocator
>
underlyning_allocator
);
std
::
shared_ptr
<
Allocator
>
underlyning_allocator
);
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
bool
IsAllocThreadSafe
()
const
;
bool
IsAllocThreadSafe
()
const
;
protected:
protected:
std
::
shared_ptr
<
Managed
Allocator
>
underlying_allocator_
;
std
::
shared_ptr
<
Allocator
>
underlying_allocator_
;
};
};
// An aligned allocator will allocate `size+kAlignment` allocation and adjust
// An aligned allocator will allocate `size+kAlignment` allocation and adjust
...
...
paddle/fluid/memory/allocation/allocator.cc
浏览文件 @
ea81f8ee
...
@@ -24,6 +24,11 @@ bool Allocator::IsAllocThreadSafe() const { return false; }
...
@@ -24,6 +24,11 @@ bool Allocator::IsAllocThreadSafe() const { return false; }
const
char
*
BadAlloc
::
what
()
const
noexcept
{
return
msg_
.
c_str
();
}
const
char
*
BadAlloc
::
what
()
const
noexcept
{
return
msg_
.
c_str
();
}
MannualFreeAllocation
::~
MannualFreeAllocation
()
{
allocator_
->
Free
(
this
);
}
std
::
unique_ptr
<
Allocation
>
MannualFreeAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
return
std
::
unique_ptr
<
Allocation
>
(
AllocateImpl
(
size
,
attr
));
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
}
// namespace paddle
}
// namespace paddle
paddle/fluid/memory/allocation/allocator.h
浏览文件 @
ea81f8ee
...
@@ -121,19 +121,30 @@ class Allocator {
...
@@ -121,19 +121,30 @@ class Allocator {
virtual
bool
IsAllocThreadSafe
()
const
;
virtual
bool
IsAllocThreadSafe
()
const
;
};
};
// User need to invoke `Free` or `FreeUniquePtr` manually if allocated by
class
MannualFreeAllocator
;
// a manally managed allocator.
class
MannualFreeAllocation
:
public
Allocation
{
class
UnmanagedAllocator
:
public
Allocator
{
public:
public:
virtual
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
=
0
;
MannualFreeAllocation
(
MannualFreeAllocator
*
allocator
,
void
*
ptr
,
size_t
size
,
platform
::
Place
place
)
:
Allocation
(
ptr
,
size
,
place
),
allocator_
(
allocator
)
{}
~
MannualFreeAllocation
();
private:
MannualFreeAllocator
*
allocator_
;
};
};
//
The allocation will be managed by smart pointers. i.e., users do not need
//
User need to invoke `Free` or `FreeUniquePtr` manually if allocated by
//
to free allocation manually
.
//
a manally managed allocator
.
class
Man
aged
Allocator
:
public
Allocator
{
class
Man
nualFree
Allocator
:
public
Allocator
{
public:
public:
virtual
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
final
;
size_t
size
,
Allocator
::
Attr
attr
=
kDefault
)
=
0
;
protected:
virtual
void
Free
(
MannualFreeAllocation
*
allocation
)
=
0
;
virtual
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
=
0
;
friend
class
MannualFreeAllocation
;
};
};
}
// namespace allocation
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/allocator_facade.cc
浏览文件 @
ea81f8ee
...
@@ -24,7 +24,6 @@
...
@@ -24,7 +24,6 @@
#include "paddle/fluid/memory/allocation/conditional_allocator.h"
#include "paddle/fluid/memory/allocation/conditional_allocator.h"
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include "paddle/fluid/memory/allocation/naive_managed_allocator.h"
#include "paddle/fluid/memory/allocation/retry_allocator.h"
#include "paddle/fluid/memory/allocation/retry_allocator.h"
#include "paddle/fluid/memory/allocation/zero_size_allocator.h"
#include "paddle/fluid/memory/allocation/zero_size_allocator.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/cpu_info.h"
...
@@ -46,34 +45,28 @@ namespace memory {
...
@@ -46,34 +45,28 @@ namespace memory {
namespace
allocation
{
namespace
allocation
{
// TODO(yy): Dirty code here. This class should be configurable in runtime.
// TODO(yy): Dirty code here. This class should be configurable in runtime.
class
CPUManagedAllocator
:
public
Managed
Allocator
{
class
CPUManagedAllocator
:
public
Allocator
{
public:
public:
CPUManagedAllocator
()
CPUManagedAllocator
()
:
normal_allocator_
(
new
CPUAllocator
())
{}
:
normal_allocator_
(
NaiveManagedAllocator
::
Create
(
std
::
unique_ptr
<
Allocator
>
(
new
CPUAllocator
())))
{}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
{
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
{
return
normal_allocator_
->
Allocate
(
size
,
attr
);
return
normal_allocator_
->
Allocate
(
size
,
attr
);
}
}
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
{
return
normal_allocator_
->
AllocateShared
(
size
,
attr
);
}
bool
IsAllocThreadSafe
()
const
override
{
return
true
;
}
bool
IsAllocThreadSafe
()
const
override
{
return
true
;
}
private:
private:
std
::
shared_ptr
<
Managed
Allocator
>
normal_allocator_
;
std
::
shared_ptr
<
Allocator
>
normal_allocator_
;
};
};
// TODO(yy): Dirty code here. This class should be configurable in runtime.
// TODO(yy): Dirty code here. This class should be configurable in runtime.
class
ChunkedManagedAllocator
:
public
Managed
Allocator
{
class
ChunkedManagedAllocator
:
public
Allocator
{
public:
public:
explicit
ChunkedManagedAllocator
(
std
::
unique_ptr
<
Allocator
>
system_allocator
,
explicit
ChunkedManagedAllocator
(
std
::
unique_ptr
<
Allocator
>
system_allocator
,
size_t
max_chunk_size
,
size_t
capacity
=
1
,
size_t
max_chunk_size
,
size_t
capacity
=
1
,
int64_t
retry_time
=
-
1
)
int64_t
retry_time
=
-
1
)
:
max_chunk_size_
(
max_chunk_size
),
retry_time_
(
retry_time
)
{
:
max_chunk_size_
(
max_chunk_size
),
retry_time_
(
retry_time
)
{
raw_allocator_
=
NaiveManagedAllocator
::
Create
(
std
::
move
(
system_allocator
)
);
raw_allocator_
=
std
::
move
(
system_allocator
);
if
(
max_chunk_size_
==
0
)
{
if
(
max_chunk_size_
==
0
)
{
default_allocator_
=
raw_allocator_
;
default_allocator_
=
raw_allocator_
;
...
@@ -114,11 +107,7 @@ class ChunkedManagedAllocator : public ManagedAllocator {
...
@@ -114,11 +107,7 @@ class ChunkedManagedAllocator : public ManagedAllocator {
return
default_allocator_
->
Allocate
(
size
,
attr
);
return
default_allocator_
->
Allocate
(
size
,
attr
);
}
}
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
{
std
::
shared_ptr
<
Allocator
>
BestFitAllocatorCreator
()
{
return
default_allocator_
->
AllocateShared
(
size
,
attr
);
}
std
::
shared_ptr
<
ManagedAllocator
>
BestFitAllocatorCreator
()
{
chunks_
.
emplace_back
(
raw_allocator_
->
Allocate
(
max_chunk_size_
));
chunks_
.
emplace_back
(
raw_allocator_
->
Allocate
(
max_chunk_size_
));
auto
*
allocation
=
chunks_
.
back
().
get
();
auto
*
allocation
=
chunks_
.
back
().
get
();
std
::
unique_ptr
<
Allocator
>
unmanaged_allocator
(
new
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>
unmanaged_allocator
(
new
LockedAllocator
(
...
@@ -127,12 +116,13 @@ class ChunkedManagedAllocator : public ManagedAllocator {
...
@@ -127,12 +116,13 @@ class ChunkedManagedAllocator : public ManagedAllocator {
if
(
retry_time_
<=
0
)
{
if
(
retry_time_
<=
0
)
{
VLOG
(
10
)
<<
"Create NaiveManagedAllocator without retry"
;
VLOG
(
10
)
<<
"Create NaiveManagedAllocator without retry"
;
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
NaiveManagedAllocator
::
Create
(
std
::
move
(
unmanaged_allocator
)
));
std
::
move
(
unmanaged_allocator
));
}
else
{
}
else
{
VLOG
(
10
)
<<
"Create RetryAllocator with retry_time "
<<
retry_time_
VLOG
(
10
)
<<
"Create RetryAllocator with retry_time "
<<
retry_time_
<<
"ms"
;
<<
"ms"
;
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
RetryAllocator
::
Create
(
auto
tmp
=
std
::
make_shared
<
RetryAllocator
>
(
std
::
move
(
unmanaged_allocator
),
static_cast
<
size_t
>
(
retry_time_
)));
std
::
move
(
unmanaged_allocator
),
static_cast
<
size_t
>
(
retry_time_
));
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
tmp
);
}
}
}
}
...
@@ -142,8 +132,8 @@ class ChunkedManagedAllocator : public ManagedAllocator {
...
@@ -142,8 +132,8 @@ class ChunkedManagedAllocator : public ManagedAllocator {
size_t
max_chunk_size_
;
size_t
max_chunk_size_
;
int64_t
retry_time_
;
int64_t
retry_time_
;
std
::
vector
<
std
::
unique_ptr
<
Allocation
>>
chunks_
;
std
::
vector
<
std
::
unique_ptr
<
Allocation
>>
chunks_
;
std
::
shared_ptr
<
Managed
Allocator
>
raw_allocator_
;
std
::
shared_ptr
<
Allocator
>
raw_allocator_
;
std
::
shared_ptr
<
Managed
Allocator
>
default_allocator_
;
std
::
shared_ptr
<
Allocator
>
default_allocator_
;
};
};
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA
...
@@ -193,7 +183,7 @@ class CUDAPinnedManagedAllocator : public ChunkedManagedAllocator {
...
@@ -193,7 +183,7 @@ class CUDAPinnedManagedAllocator : public ChunkedManagedAllocator {
class
AllocatorFacadePrivate
{
class
AllocatorFacadePrivate
{
public:
public:
std
::
map
<
platform
::
Place
,
std
::
shared_ptr
<
Managed
Allocator
>>
allocators_
;
std
::
map
<
platform
::
Place
,
std
::
shared_ptr
<
Allocator
>>
allocators_
;
~
AllocatorFacadePrivate
()
=
default
;
~
AllocatorFacadePrivate
()
=
default
;
...
@@ -245,7 +235,8 @@ AllocatorFacade& AllocatorFacade::Instance() {
...
@@ -245,7 +235,8 @@ AllocatorFacade& AllocatorFacade::Instance() {
std
::
shared_ptr
<
Allocation
>
AllocatorFacade
::
AllocShared
(
std
::
shared_ptr
<
Allocation
>
AllocatorFacade
::
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
,
Allocator
::
Attr
attr
)
{
const
platform
::
Place
&
place
,
size_t
size
,
Allocator
::
Attr
attr
)
{
return
m_
->
allocators_
.
at
(
place
)
->
AllocateShared
(
size
,
attr
);
return
std
::
shared_ptr
<
Allocation
>
(
m_
->
allocators_
.
at
(
place
)
->
Allocate
(
size
,
attr
).
release
());
}
}
std
::
unique_ptr
<
Allocation
>
AllocatorFacade
::
Alloc
(
const
platform
::
Place
&
place
,
std
::
unique_ptr
<
Allocation
>
AllocatorFacade
::
Alloc
(
const
platform
::
Place
&
place
,
...
...
paddle/fluid/memory/allocation/auto_increment_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -20,20 +20,61 @@ namespace allocation {
...
@@ -20,20 +20,61 @@ namespace allocation {
std
::
unique_ptr
<
Allocation
>
AutoIncrementAllocator
::
Allocate
(
std
::
unique_ptr
<
Allocation
>
AutoIncrementAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
size_t
size
,
Allocator
::
Attr
attr
)
{
return
InvokeOrCreateUnderlyingAllocator
([
&
](
ManagedAllocator
&
allocator
)
{
auto
cur
=
prev_success_allocator_
.
load
();
return
allocator
.
Allocate
(
size
,
attr
);
size_t
retry_count
=
allocator_num_
.
load
();
});
size_t
allocator_num
=
retry_count
;
}
while
(
retry_count
--
>
0
)
{
// until there retry count is zero
try
{
auto
res
=
underlying_allocators_
[
cur
]
->
Allocate
(
size
,
attr
);
prev_success_allocator_
=
cur
;
return
res
;
}
catch
(
BadAlloc
&
)
{
if
(
++
cur
>=
allocator_num
)
{
cur
=
0
;
}
}
catch
(...)
{
// if there is another type of allocation, just rethrow it.
throw
;
}
}
std
::
shared_ptr
<
Allocation
>
AutoIncrementAllocator
::
AllocateShared
(
// This happens when the first allocator is exhausted and
size_t
size
,
Allocator
::
Attr
attr
)
{
// there are more than 1 allocation requests
return
InvokeOrCreateUnderlyingAllocator
([
&
](
ManagedAllocator
&
allocator
)
{
// In this situation, the first allocation request would success
return
allocator
.
AllocateShared
(
size
,
attr
);
// and the second allocation request would fail if we do not use
});
// the newly created allocator by the first allocation request.
for
(
cur
=
allocator_num
;
cur
<
allocator_num_
;
++
cur
)
{
try
{
auto
ret
=
underlying_allocators_
[
cur
]
->
Allocate
(
size
,
attr
);
prev_success_allocator_
=
cur
;
return
ret
;
}
catch
(
BadAlloc
&
)
{
}
catch
(...)
{
throw
;
}
}
// No suitable allocator
return
CreateNewAllocator
()
->
Allocate
(
size
,
attr
);
}
}
bool
AutoIncrementAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
bool
AutoIncrementAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
std
::
shared_ptr
<
Allocator
>
AutoIncrementAllocator
::
CreateNewAllocator
()
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
auto
old_size
=
allocator_num_
.
load
();
PADDLE_ENFORCE_LT
(
old_size
,
underlying_allocators_
.
size
(),
"Allocator number exceeds capacity %d"
,
underlying_allocators_
.
size
());
underlying_allocators_
[
old_size
]
=
creator_
();
prev_success_allocator_
=
old_size
;
++
allocator_num_
;
PADDLE_ENFORCE
(
underlying_allocators_
[
old_size
]
->
IsAllocThreadSafe
(),
"the underlying allocator must be thread safe. This is a program "
"bug."
);
return
underlying_allocators_
[
old_size
];
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
}
// namespace paddle
}
// namespace paddle
paddle/fluid/memory/allocation/auto_increment_allocator.h
浏览文件 @
ea81f8ee
...
@@ -46,76 +46,20 @@ namespace allocation {
...
@@ -46,76 +46,20 @@ namespace allocation {
// thread-safe std::vector with varying size is hard to implement.
// thread-safe std::vector with varying size is hard to implement.
// Fortunately, we can get the total GPU memory and each chunk size.
// Fortunately, we can get the total GPU memory and each chunk size.
// Therefore, we can get the suitable capacity of AutoIncrementAllocator.
// Therefore, we can get the suitable capacity of AutoIncrementAllocator.
class
AutoIncrementAllocator
:
public
Managed
Allocator
{
class
AutoIncrementAllocator
:
public
Allocator
{
public:
public:
// Creator is the method to create ManagedAllocator
// Creator is the method to create ManagedAllocator
using
AllocatorCreator
=
std
::
function
<
std
::
shared_ptr
<
Managed
Allocator
>
()
>
;
using
AllocatorCreator
=
std
::
function
<
std
::
shared_ptr
<
Allocator
>
()
>
;
explicit
AutoIncrementAllocator
(
AllocatorCreator
&&
creator
,
size_t
capacity
)
explicit
AutoIncrementAllocator
(
AllocatorCreator
&&
creator
,
size_t
capacity
)
:
creator_
(
std
::
move
(
creator
)),
underlying_allocators_
(
capacity
)
{}
:
creator_
(
std
::
move
(
creator
)),
underlying_allocators_
(
capacity
)
{}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
private:
private:
// NOTE: here use template Callback, it can be inlined when -O3
std
::
shared_ptr
<
Allocator
>
CreateNewAllocator
();
template
<
typename
Callback
>
inline
typename
std
::
result_of
<
Callback
(
ManagedAllocator
&
)
>::
type
InvokeOrCreateUnderlyingAllocator
(
Callback
callback
)
{
auto
cur
=
prev_success_allocator_
.
load
();
size_t
retry_count
=
allocator_num_
.
load
();
size_t
allocator_num
=
retry_count
;
while
(
retry_count
--
>
0
)
{
// until there retry count is zero
try
{
auto
res
=
callback
(
*
underlying_allocators_
[
cur
]);
prev_success_allocator_
=
cur
;
return
std
::
move
(
res
);
}
catch
(
BadAlloc
&
)
{
if
(
++
cur
>=
allocator_num
)
{
cur
=
0
;
}
}
catch
(...)
{
// if there is another type of allocation, just rethrow it.
throw
;
}
}
// This happens when the first allocator is exhausted and
// there are more than 1 allocation requests
// In this situation, the first allocation request would success
// and the second allocation request would fail if we do not use
// the newly created allocator by the first allocation request.
for
(
cur
=
allocator_num
;
cur
<
allocator_num_
;
++
cur
)
{
try
{
auto
ret
=
callback
(
*
underlying_allocators_
[
cur
]);
prev_success_allocator_
=
cur
;
return
std
::
move
(
ret
);
}
catch
(
BadAlloc
&
)
{
}
catch
(...)
{
throw
;
}
}
// No suitable allocator
ManagedAllocator
*
new_allocator
;
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
auto
old_size
=
allocator_num_
.
load
();
PADDLE_ENFORCE_LT
(
old_size
,
underlying_allocators_
.
size
(),
"Allocator number exceeds capacity %d"
,
underlying_allocators_
.
size
());
underlying_allocators_
[
old_size
]
=
creator_
();
new_allocator
=
underlying_allocators_
[
old_size
].
get
();
prev_success_allocator_
=
old_size
;
++
allocator_num_
;
}
PADDLE_ENFORCE
(
new_allocator
->
IsAllocThreadSafe
(),
"the underlying allocator must be thread safe. This is a program "
"bug."
);
return
callback
(
*
new_allocator
);
}
AllocatorCreator
creator_
;
AllocatorCreator
creator_
;
...
...
paddle/fluid/memory/allocation/best_fit_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -45,23 +45,6 @@ BestFitAllocator::BestFitAllocator(Allocation* allocation)
...
@@ -45,23 +45,6 @@ BestFitAllocator::BestFitAllocator(Allocation* allocation)
{
chunk
.
size_
,
chunks_
.
begin
()});
{
chunk
.
size_
,
chunks_
.
begin
()});
}
}
std
::
unique_ptr
<
Allocation
>
BestFitAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
auto
highest_set_bit
=
static_cast
<
size_t
>
(
HighestBitPos
(
size
));
MapIt
map_it
;
for
(;
highest_set_bit
<
free_chunks_
.
size
();
++
highest_set_bit
)
{
map_it
=
free_chunks_
[
highest_set_bit
].
lower_bound
(
size
);
if
(
map_it
!=
free_chunks_
[
highest_set_bit
].
end
())
{
break
;
}
}
if
(
UNLIKELY
(
highest_set_bit
==
free_chunks_
.
size
()))
{
throw
BadAlloc
(
string
::
Sprintf
(
"Cannot allocate %d, All fragments size is %d"
,
size
,
FreeSize
()));
}
auto
chunk_it
=
SplitChunk
(
size
,
highest_set_bit
,
map_it
);
return
std
::
unique_ptr
<
Allocation
>
(
new
BestFitAllocation
(
this
,
chunk_it
));
}
size_t
BestFitAllocator
::
FreeSize
()
const
{
size_t
BestFitAllocator
::
FreeSize
()
const
{
size_t
acc
=
0
;
size_t
acc
=
0
;
for
(
auto
&
array_item
:
free_chunks_
)
{
for
(
auto
&
array_item
:
free_chunks_
)
{
...
@@ -104,8 +87,30 @@ BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size,
...
@@ -104,8 +87,30 @@ BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size,
return
to_use_it
;
return
to_use_it
;
}
}
void
BestFitAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
void
BestFitAllocator
::
InsertFreeNode
(
const
ListIt
&
it
)
{
auto
*
bf_allocation
=
dynamic_cast
<
BestFitAllocation
*>
(
allocation
.
get
());
auto
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
auto
&
free_map
=
free_chunks_
[
pos
];
free_map
.
insert
({
it
->
size_
,
it
});
}
void
BestFitAllocator
::
EraseFreeNode
(
const
ListIt
&
it
)
{
size_t
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
auto
&
free_map
=
free_chunks_
[
pos
];
auto
map_it
=
free_map
.
find
(
it
->
size_
);
while
(
map_it
->
second
!=
it
&&
map_it
!=
free_map
.
end
())
{
++
map_it
;
}
PADDLE_ENFORCE
(
map_it
!=
free_map
.
end
());
free_map
.
erase
(
map_it
);
}
size_t
BestFitAllocator
::
NumFreeChunks
()
const
{
size_t
num
=
0
;
for
(
auto
&
array_item
:
free_chunks_
)
{
num
+=
array_item
.
size
();
}
return
num
;
}
void
BestFitAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
auto
*
bf_allocation
=
dynamic_cast
<
BestFitAllocation
*>
(
allocation
);
auto
chunk_it
=
bf_allocation
->
ChunkIterator
();
auto
chunk_it
=
bf_allocation
->
ChunkIterator
();
PADDLE_ENFORCE
(
!
chunk_it
->
is_free
);
PADDLE_ENFORCE
(
!
chunk_it
->
is_free
);
chunk_it
->
is_free
=
true
;
chunk_it
->
is_free
=
true
;
...
@@ -132,38 +137,32 @@ void BestFitAllocator::FreeUniquePtr(std::unique_ptr<Allocation> allocation) {
...
@@ -132,38 +137,32 @@ void BestFitAllocator::FreeUniquePtr(std::unique_ptr<Allocation> allocation) {
InsertFreeNode
(
chunk_it
);
InsertFreeNode
(
chunk_it
);
}
}
MannualFreeAllocation
*
BestFitAllocator
::
AllocateImpl
(
size_t
size
,
void
BestFitAllocator
::
InsertFreeNode
(
const
ListIt
&
it
)
{
Allocator
::
Attr
attr
)
{
auto
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
auto
highest_set_bit
=
static_cast
<
size_t
>
(
HighestBitPos
(
size
));
auto
&
free_map
=
free_chunks_
[
pos
];
MapIt
map_it
;
free_map
.
insert
({
it
->
size_
,
it
});
for
(;
highest_set_bit
<
free_chunks_
.
size
();
++
highest_set_bit
)
{
}
map_it
=
free_chunks_
[
highest_set_bit
].
lower_bound
(
size
);
void
BestFitAllocator
::
EraseFreeNode
(
const
ListIt
&
it
)
{
if
(
map_it
!=
free_chunks_
[
highest_set_bit
].
end
())
{
size_t
pos
=
static_cast
<
size_t
>
(
HighestBitPos
(
it
->
size_
));
break
;
auto
&
free_map
=
free_chunks_
[
pos
];
}
auto
map_it
=
free_map
.
find
(
it
->
size_
);
while
(
map_it
->
second
!=
it
&&
map_it
!=
free_map
.
end
())
{
++
map_it
;
}
}
PADDLE_ENFORCE
(
map_it
!=
free_map
.
end
());
if
(
UNLIKELY
(
highest_set_bit
==
free_chunks_
.
size
()))
{
free_map
.
erase
(
map_it
);
throw
BadAlloc
(
string
::
Sprintf
(
}
"Cannot allocate %d, All fragments size is %d"
,
size
,
FreeSize
()));
size_t
BestFitAllocator
::
NumFreeChunks
()
const
{
size_t
num
=
0
;
for
(
auto
&
array_item
:
free_chunks_
)
{
num
+=
array_item
.
size
();
}
}
return
num
;
auto
chunk_it
=
SplitChunk
(
size
,
highest_set_bit
,
map_it
);
return
new
BestFitAllocation
(
this
,
chunk_it
);
}
}
BestFitAllocation
::
BestFitAllocation
(
BestFitAllocation
::
BestFitAllocation
(
paddle
::
memory
::
allocation
::
BestFitAllocator
*
allocator
,
paddle
::
memory
::
allocation
::
BestFitAllocator
*
allocator
,
typename
details
::
ChunkList
::
iterator
chunk_it
)
typename
details
::
ChunkList
::
iterator
chunk_it
)
:
Allocation
(
reinterpret_cast
<
void
*>
(
:
MannualFreeAllocation
(
reinterpret_cast
<
uintptr_t
>
(
allocator
->
BasePtr
())
+
allocator
,
reinterpret_cast
<
void
*>
(
chunk_it
->
offset_
),
reinterpret_cast
<
uintptr_t
>
(
allocator
->
BasePtr
())
+
chunk_it
->
size_
,
allocator
->
Place
()
),
chunk_it
->
offset_
),
allocator_
(
allocator
),
chunk_it
->
size_
,
allocator
->
Place
()
),
chunk_it_
(
chunk_it
)
{}
chunk_it_
(
chunk_it
)
{}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
...
...
paddle/fluid/memory/allocation/best_fit_allocator.h
浏览文件 @
ea81f8ee
...
@@ -71,7 +71,7 @@ using FreeChunkBin =
...
@@ -71,7 +71,7 @@ using FreeChunkBin =
class
BestFitAllocator
;
class
BestFitAllocator
;
// The BestFitAllocation maintain the List Node iterator.
// The BestFitAllocation maintain the List Node iterator.
class
BestFitAllocation
:
public
Allocation
{
class
BestFitAllocation
:
public
MannualFree
Allocation
{
private:
private:
using
ListIt
=
typename
details
::
ChunkList
::
iterator
;
using
ListIt
=
typename
details
::
ChunkList
::
iterator
;
...
@@ -81,7 +81,6 @@ class BestFitAllocation : public Allocation {
...
@@ -81,7 +81,6 @@ class BestFitAllocation : public Allocation {
const
ListIt
&
ChunkIterator
()
const
{
return
chunk_it_
;
}
const
ListIt
&
ChunkIterator
()
const
{
return
chunk_it_
;
}
private:
private:
BestFitAllocator
*
allocator_
;
typename
details
::
ChunkList
::
iterator
chunk_it_
;
typename
details
::
ChunkList
::
iterator
chunk_it_
;
};
};
...
@@ -99,7 +98,7 @@ class BestFitAllocation : public Allocation {
...
@@ -99,7 +98,7 @@ class BestFitAllocation : public Allocation {
//
//
// To free an allocation, it will set the chunk of allocation to free and merge
// To free an allocation, it will set the chunk of allocation to free and merge
// the prev-chunk and the next-chunk when possible.
// the prev-chunk and the next-chunk when possible.
class
BestFitAllocator
:
public
Unmanaged
Allocator
{
class
BestFitAllocator
:
public
MannualFree
Allocator
{
public:
public:
explicit
BestFitAllocator
(
Allocation
*
allocation
);
explicit
BestFitAllocator
(
Allocation
*
allocation
);
...
@@ -107,9 +106,9 @@ class BestFitAllocator : public UnmanagedAllocator {
...
@@ -107,9 +106,9 @@ class BestFitAllocator : public UnmanagedAllocator {
const
platform
::
Place
&
Place
()
const
{
return
allocation_
->
place
();
}
const
platform
::
Place
&
Place
()
const
{
return
allocation_
->
place
();
}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
//
std::unique_ptr<Allocation> Allocate(size_t size,
Attr
attr
=
kDefault
)
override
;
//
Attr attr = kDefault) override;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
//
void FreeUniquePtr(std::unique_ptr<Allocation> allocation) override;
size_t
NumFreeChunks
()
const
;
size_t
NumFreeChunks
()
const
;
...
@@ -123,6 +122,12 @@ class BestFitAllocator : public UnmanagedAllocator {
...
@@ -123,6 +122,12 @@ class BestFitAllocator : public UnmanagedAllocator {
void
EraseFreeNode
(
const
ListIt
&
it
);
void
EraseFreeNode
(
const
ListIt
&
it
);
void
InsertFreeNode
(
const
ListIt
&
it
);
void
InsertFreeNode
(
const
ListIt
&
it
);
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
Allocation
*
allocation_
;
// not owned
Allocation
*
allocation_
;
// not owned
details
::
ChunkList
chunks_
;
details
::
ChunkList
chunks_
;
details
::
FreeChunkBin
free_chunks_
;
details
::
FreeChunkBin
free_chunks_
;
...
...
paddle/fluid/memory/allocation/buffered_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -16,14 +16,14 @@
...
@@ -16,14 +16,14 @@
#include <algorithm>
#include <algorithm>
#include <limits>
#include <limits>
#include <utility>
#include <utility>
#include "paddle/fluid/memory/allocation/underlying_manual_allocation.h"
namespace
paddle
{
namespace
paddle
{
namespace
memory
{
namespace
memory
{
namespace
allocation
{
namespace
allocation
{
BufferedAllocator
::
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>&&
allocator
)
{
BufferedAllocator
::
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
)
underlying_allocator_
.
reset
(
:
underlying_allocator_
(
std
::
move
(
allocator
))
{
dynamic_cast
<
UnmanagedAllocator
*>
(
allocator
.
release
()));
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator_
,
underlying_allocator_
,
"Underlying allocator of BufferedAllocator must be unmanaged"
);
"Underlying allocator of BufferedAllocator must be unmanaged"
);
...
@@ -34,26 +34,6 @@ BufferedAllocator::BufferedAllocator(std::unique_ptr<Allocator>&& allocator) {
...
@@ -34,26 +34,6 @@ BufferedAllocator::BufferedAllocator(std::unique_ptr<Allocator>&& allocator) {
BufferedAllocator
::~
BufferedAllocator
()
{
FreeCache
(
-
1UL
);
}
BufferedAllocator
::~
BufferedAllocator
()
{
FreeCache
(
-
1UL
);
}
std
::
unique_ptr
<
Allocation
>
BufferedAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
auto
it
=
allocations_
.
lower_bound
(
size
);
if
(
it
!=
allocations_
.
end
()
&&
it
->
first
<
size
*
2
)
{
std
::
unique_ptr
<
Allocation
>
result
(
std
::
move
(
it
->
second
));
allocations_
.
erase
(
it
);
return
result
;
}
}
try
{
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
catch
(
BadAlloc
&
)
{
FreeCache
(
size
);
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
}
void
BufferedAllocator
::
FreeCache
(
size_t
size
)
{
void
BufferedAllocator
::
FreeCache
(
size_t
size
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
if
(
UNLIKELY
(
size
==
0
))
return
;
if
(
UNLIKELY
(
size
==
0
))
return
;
...
@@ -61,19 +41,42 @@ void BufferedAllocator::FreeCache(size_t size) {
...
@@ -61,19 +41,42 @@ void BufferedAllocator::FreeCache(size_t size) {
while
(
!
allocations_
.
empty
())
{
// free the largest
while
(
!
allocations_
.
empty
())
{
// free the largest
auto
it
=
--
allocations_
.
end
();
auto
it
=
--
allocations_
.
end
();
cur
+=
it
->
second
->
size
();
cur
+=
it
->
second
->
size
();
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
it
->
second
));
allocations_
.
erase
(
it
);
allocations_
.
erase
(
it
);
if
(
cur
>=
size
)
return
;
if
(
cur
>=
size
)
return
;
}
}
}
}
void
BufferedAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
bool
BufferedAllocator
::
IsAllocThreadSafe
()
const
{
return
this
->
underlying_allocator_
->
IsAllocThreadSafe
();
}
void
BufferedAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
allocations_
.
emplace
(
allocation
->
size
(),
std
::
move
(
allocation
));
std
::
unique_ptr
<
Allocation
>
new_allocation
(
new
UnderlyingManualAllocation
(
this
,
std
::
move
(
reinterpret_cast
<
UnderlyingManualAllocation
*>
(
allocation
)
->
allocation_
)));
allocations_
.
emplace
(
allocation
->
size
(),
std
::
move
(
new_allocation
));
}
}
MannualFreeAllocation
*
BufferedAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
auto
it
=
allocations_
.
lower_bound
(
size
);
if
(
it
!=
allocations_
.
end
()
&&
it
->
first
<
size
*
2
)
{
std
::
unique_ptr
<
Allocation
>
result
(
std
::
move
(
it
->
second
));
allocations_
.
erase
(
it
);
return
new
UnderlyingManualAllocation
(
this
,
std
::
move
(
result
));
}
}
bool
BufferedAllocator
::
IsAllocThreadSafe
()
const
{
try
{
return
this
->
underlying_allocator_
->
IsAllocThreadSafe
();
return
new
UnderlyingManualAllocation
(
this
,
underlying_allocator_
->
Allocate
(
size
,
attr
));
}
catch
(
BadAlloc
&
)
{
FreeCache
(
size
);
return
new
UnderlyingManualAllocation
(
this
,
underlying_allocator_
->
Allocate
(
size
,
attr
));
}
}
}
}
// namespace allocation
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/buffered_allocator.h
浏览文件 @
ea81f8ee
...
@@ -29,16 +29,17 @@ namespace allocation {
...
@@ -29,16 +29,17 @@ namespace allocation {
// memory allocation and reuse memory.
// memory allocation and reuse memory.
// BufferedAllocator provides the same thread-safety level as
// BufferedAllocator provides the same thread-safety level as
// underlying_allocator_
// underlying_allocator_
class
BufferedAllocator
:
public
Unmanaged
Allocator
{
class
BufferedAllocator
:
public
MannualFree
Allocator
{
public:
public:
explicit
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
);
explicit
BufferedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
);
~
BufferedAllocator
();
~
BufferedAllocator
();
std
::
unique_ptr
<
Allocation
>
Allocate
(
// std::unique_ptr<Allocation> Allocate(
size_t
size
,
Allocator
::
Attr
attr
=
Allocator
::
Attr
::
kDefault
)
override
;
// size_t size, Allocator::Attr attr = Allocator::Attr::kDefault)
// override;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
//
// void FreeUniquePtr(std::unique_ptr<Allocation> allocation) override;
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
...
@@ -48,7 +49,13 @@ class BufferedAllocator : public UnmanagedAllocator {
...
@@ -48,7 +49,13 @@ class BufferedAllocator : public UnmanagedAllocator {
private:
private:
void
FreeCache
(
size_t
size
);
void
FreeCache
(
size_t
size
);
std
::
unique_ptr
<
UnmanagedAllocator
>
underlying_allocator_
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
std
::
unique_ptr
<
Allocator
>
underlying_allocator_
;
std
::
multimap
<
size_t
,
std
::
unique_ptr
<
Allocation
>>
allocations_
;
std
::
multimap
<
size_t
,
std
::
unique_ptr
<
Allocation
>>
allocations_
;
std
::
unique_ptr
<
std
::
mutex
>
mtx_
;
std
::
unique_ptr
<
std
::
mutex
>
mtx_
;
};
};
...
...
paddle/fluid/memory/allocation/conditional_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -20,23 +20,27 @@ namespace allocation {
...
@@ -20,23 +20,27 @@ namespace allocation {
ConditionalAllocator
&
ConditionalAllocator
::
AddAllocator
(
ConditionalAllocator
&
ConditionalAllocator
::
AddAllocator
(
std
::
function
<
bool
(
size_t
,
Allocator
::
Attr
)
>
func
,
std
::
function
<
bool
(
size_t
,
Allocator
::
Attr
)
>
func
,
std
::
shared_ptr
<
Managed
Allocator
>
allocator
)
{
std
::
shared_ptr
<
Allocator
>
allocator
)
{
underlying_allocators_
.
emplace_back
(
std
::
move
(
func
),
std
::
move
(
allocator
));
underlying_allocators_
.
emplace_back
(
std
::
move
(
func
),
std
::
move
(
allocator
));
return
*
this
;
return
*
this
;
}
}
std
::
unique_ptr
<
Allocation
>
ConditionalAllocator
::
Allocate
(
std
::
unique_ptr
<
Allocation
>
ConditionalAllocator
::
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
{
size_t
size
,
Allocator
::
Attr
attr
)
{
return
SelectAndInvoke
(
size
,
attr
,
[
&
](
ManagedAllocator
&
allocator
)
{
for
(
auto
&
pair
:
underlying_allocators_
)
{
return
allocator
.
Allocate
(
size
,
attr
);
if
(
pair
.
first
(
size
,
attr
))
{
});
return
pair
.
second
->
Allocate
(
size
,
attr
);
}
}
throw
BadAlloc
(
"No suitable allocator"
);
}
}
std
::
shared_ptr
<
Allocation
>
ConditionalAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
bool
ConditionalAllocator
::
IsAllocThreadSafe
()
const
{
return
SelectAndInvoke
(
size
,
attr
,
[
&
](
ManagedAllocator
&
allocator
)
{
return
std
::
all_of
(
underlying_allocators_
.
begin
(),
return
allocator
.
AllocateShared
(
size
,
attr
);
underlying_allocators_
.
end
(),
});
[](
const
AllocatorWithCond
&
allocatorWithCond
)
{
return
allocatorWithCond
.
second
->
IsAllocThreadSafe
();
});
}
}
bool
ConditionalAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
...
...
paddle/fluid/memory/allocation/conditional_allocator.h
浏览文件 @
ea81f8ee
...
@@ -38,32 +38,21 @@ namespace allocation {
...
@@ -38,32 +38,21 @@ namespace allocation {
// // else
// // else
// return true;
// return true;
// }, allocator_c);
// }, allocator_c);
class
ConditionalAllocator
:
public
Managed
Allocator
{
class
ConditionalAllocator
:
public
Allocator
{
public:
public:
ConditionalAllocator
()
=
default
;
ConditionalAllocator
()
=
default
;
ConditionalAllocator
&
AddAllocator
(
ConditionalAllocator
&
AddAllocator
(
std
::
function
<
bool
(
size_t
,
Attr
)
>
func
,
std
::
function
<
bool
(
size_t
,
Attr
)
>
func
,
std
::
shared_ptr
<
Allocator
>
allocator
);
std
::
shared_ptr
<
ManagedAllocator
>
allocator
);
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
private:
private:
template
<
typename
Callback
>
using
AllocatorWithCond
=
inline
typename
std
::
result_of
<
Callback
(
ManagedAllocator
&
)
>::
type
std
::
pair
<
std
::
function
<
bool
(
size_t
,
Attr
)
>
,
std
::
shared_ptr
<
Allocator
>>
;
SelectAndInvoke
(
size_t
size
,
Attr
attr
,
Callback
callback
)
{
std
::
vector
<
AllocatorWithCond
>
underlying_allocators_
;
for
(
auto
&
pair
:
underlying_allocators_
)
{
if
(
pair
.
first
(
size
,
attr
))
{
return
callback
(
*
pair
.
second
);
}
}
PADDLE_THROW
(
"No suitable allocator"
);
}
std
::
vector
<
std
::
pair
<
std
::
function
<
bool
(
size_t
,
Attr
)
>
,
std
::
shared_ptr
<
ManagedAllocator
>>>
underlying_allocators_
;
};
};
}
// namespace allocation
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/cpu_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -20,21 +20,27 @@ namespace paddle {
...
@@ -20,21 +20,27 @@ namespace paddle {
namespace
memory
{
namespace
memory
{
namespace
allocation
{
namespace
allocation
{
std
::
unique_ptr
<
Allocation
>
CPUAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
CPUAllocation
::
CPUAllocation
(
void
*
ptr
;
paddle
::
memory
::
allocation
::
CPUAllocator
*
allocator
,
void
*
ptr
,
size_t
size
)
:
MannualFreeAllocation
(
allocator
,
ptr
,
size
,
platform
::
CPUPlace
())
{}
bool
CPUAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
void
CPUAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
CPUAllocation
*>
(
allocation
));
free
(
allocation
->
ptr
());
}
MannualFreeAllocation
*
CPUAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
void
*
ptr
;
auto
status
=
posix_memalign
(
&
ptr
,
kAlignment
,
size
);
auto
status
=
posix_memalign
(
&
ptr
,
kAlignment
,
size
);
if
(
UNLIKELY
(
status
)
!=
0
)
{
if
(
UNLIKELY
(
status
)
!=
0
)
{
throw
BadAlloc
(
string
::
Sprintf
(
"Cannot allocate cpu memory %d. Errno is %d"
,
throw
BadAlloc
(
string
::
Sprintf
(
"Cannot allocate cpu memory %d. Errno is %d"
,
size
,
status
));
size
,
status
));
}
}
return
std
::
unique_ptr
<
Allocation
>
(
new
CPUAllocation
(
ptr
,
size
));
return
new
CPUAllocation
(
this
,
ptr
,
size
);
}
void
CPUAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
CPUAllocation
*>
(
allocation
.
get
()));
free
(
allocation
->
ptr
());
}
}
bool
CPUAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
}
// namespace paddle
}
// namespace paddle
paddle/fluid/memory/allocation/cpu_allocator.h
浏览文件 @
ea81f8ee
...
@@ -25,19 +25,21 @@ namespace allocation {
...
@@ -25,19 +25,21 @@ namespace allocation {
//
//
// NOTE(yy): It is no need to use `BestFitAllocator` in CPU. We can import
// NOTE(yy): It is no need to use `BestFitAllocator` in CPU. We can import
// an open-sourced allocator into Paddle.
// an open-sourced allocator into Paddle.
class
CPUAllocation
:
public
Allocation
{
class
CPUAllocator
;
class
CPUAllocation
:
public
MannualFreeAllocation
{
public:
public:
CPUAllocation
(
void
*
ptr
,
size_t
size
)
CPUAllocation
(
CPUAllocator
*
allocator
,
void
*
ptr
,
size_t
size
);
:
Allocation
(
ptr
,
size
,
platform
::
CPUPlace
())
{}
};
};
class
CPUAllocator
:
public
Unmanaged
Allocator
{
class
CPUAllocator
:
public
MannualFree
Allocator
{
public:
public:
constexpr
static
size_t
kAlignment
=
64u
;
constexpr
static
size_t
kAlignment
=
64u
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
};
};
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
...
...
paddle/fluid/memory/allocation/locked_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -14,36 +14,32 @@
...
@@ -14,36 +14,32 @@
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include "paddle/fluid/memory/allocation/locked_allocator.h"
#include <mutex> // NOLINT
#include <mutex> // NOLINT
#include "paddle/fluid/memory/allocation/underlying_manual_allocation.h"
#include "paddle/fluid/platform/lock_guard_ptr.h"
namespace
paddle
{
namespace
paddle
{
namespace
memory
{
namespace
memory
{
namespace
allocation
{
namespace
allocation
{
std
::
unique_ptr
<
Allocation
>
LockedAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
if
(
underlying_allocator_
->
IsAllocThreadSafe
())
{
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
else
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
}
void
LockedAllocator
::
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
{
if
(
underlying_allocator_
->
IsAllocThreadSafe
())
{
return
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
allocation
));
}
else
{
std
::
lock_guard
<
std
::
mutex
>
guard
(
mtx_
);
return
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
allocation
));
}
}
bool
LockedAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
bool
LockedAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
LockedAllocator
::
LockedAllocator
(
LockedAllocator
::
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
underlying_allocator
)
{
std
::
unique_ptr
<
Allocator
>
&&
underlying_allocator
)
auto
*
allocator
=
:
underlying_allocator_
(
std
::
move
(
underlying_allocator
))
{
dynamic_cast
<
UnmanagedAllocator
*>
(
underlying_allocator
.
get
());
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator_
);
PADDLE_ENFORCE_NOT_NULL
(
allocator
);
if
(
!
underlying_allocator_
->
IsAllocThreadSafe
())
{
underlying_allocator
.
release
();
mtx_
.
reset
(
new
std
::
mutex
());
underlying_allocator_
.
reset
(
allocator
);
}
}
void
LockedAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
reinterpret_cast
<
UnderlyingManualAllocation
*>
(
allocation
)
->
allocation_
.
reset
();
}
MannualFreeAllocation
*
LockedAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
platform
::
LockGuardPtr
<
std
::
mutex
>
guard
(
mtx_
);
return
new
UnderlyingManualAllocation
(
this
,
underlying_allocator_
->
Allocate
(
size
,
attr
));
}
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
...
...
paddle/fluid/memory/allocation/locked_allocator.h
浏览文件 @
ea81f8ee
...
@@ -22,17 +22,19 @@ namespace memory {
...
@@ -22,17 +22,19 @@ namespace memory {
namespace
allocation
{
namespace
allocation
{
// A allocator to make underlying allocator thread safe.
// A allocator to make underlying allocator thread safe.
class
LockedAllocator
:
public
Unmanaged
Allocator
{
class
LockedAllocator
:
public
MannualFree
Allocator
{
public:
public:
explicit
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>&&
underlying_allocator
);
explicit
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
underlying_allocator
);
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
private:
std
::
unique_ptr
<
Unmanaged
Allocator
>
underlying_allocator_
;
std
::
unique_ptr
<
Allocator
>
underlying_allocator_
;
std
::
mutex
mtx_
;
std
::
unique_ptr
<
std
::
mutex
>
mtx_
;
};
};
}
// namespace allocation
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/naive_managed_allocator.cc
已删除
100644 → 0
浏览文件 @
02631965
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/allocation/naive_managed_allocator.h"
namespace
paddle
{
namespace
memory
{
namespace
allocation
{
NaiveManagedAllocator
::
NaiveManagedAllocator
(
std
::
unique_ptr
<
Allocator
>
&&
allocator
)
{
auto
*
underlying_allocator
=
dynamic_cast
<
UnmanagedAllocator
*>
(
allocator
.
get
());
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator
);
allocator
.
release
();
Init
(
std
::
unique_ptr
<
UnmanagedAllocator
>
(
underlying_allocator
));
}
NaiveManagedAllocator
::
NaiveManagedAllocator
(
std
::
unique_ptr
<
UnmanagedAllocator
>
&&
allocator
)
{
Init
(
std
::
move
(
allocator
));
}
void
NaiveManagedAllocator
::
Init
(
std
::
unique_ptr
<
UnmanagedAllocator
>
&&
allocator
)
{
underlying_allocator_
=
std
::
move
(
allocator
);
}
bool
NaiveManagedAllocator
::
IsAllocThreadSafe
()
const
{
return
underlying_allocator_
->
IsAllocThreadSafe
();
}
std
::
unique_ptr
<
Allocation
>
NaiveManagedAllocator
::
Allocate
(
size_t
size
,
Attr
attr
)
{
std
::
unique_ptr
<
Allocation
>
allocation
=
underlying_allocator_
->
Allocate
(
size
,
attr
);
return
std
::
unique_ptr
<
Allocation
>
(
new
NaiveManagedAllocation
(
std
::
move
(
allocation
),
shared_from_this
()));
}
std
::
shared_ptr
<
Allocation
>
NaiveManagedAllocator
::
AllocateShared
(
size_t
size
,
Attr
attr
)
{
std
::
unique_ptr
<
Allocation
>
allocation
=
underlying_allocator_
->
Allocate
(
size
,
attr
);
return
std
::
shared_ptr
<
Allocation
>
(
new
NaiveManagedAllocation
(
std
::
move
(
allocation
),
shared_from_this
()));
}
NaiveManagedAllocation
::~
NaiveManagedAllocation
()
{
auto
allocator
=
allocator_
.
lock
();
if
(
UNLIKELY
(
allocator
==
nullptr
))
{
// the allocator is destructed before allocations.
// do nothing.
return
;
}
// invoke Free
allocator
->
UnderlyingAllocator
().
FreeUniquePtr
(
std
::
move
(
underlying_allocation_
));
}
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/naive_managed_allocator.h
已删除
100644 → 0
浏览文件 @
02631965
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory>
#include "paddle/fluid/memory/allocation/allocator.h"
namespace
paddle
{
namespace
memory
{
namespace
allocation
{
// An allocator to wrap an UnmanagedAllocator and make the allocation managed
// by C++ smart ptr.
//
// NOTE: if the NaiveManagedAllocator is destroyed before
// NaiveManagedAllocations, the allocation will never be released.
class
NaiveManagedAllocator
;
class
NaiveManagedAllocation
:
public
Allocation
{
public:
NaiveManagedAllocation
(
std
::
unique_ptr
<
Allocation
>&&
underlying_allocation
,
std
::
shared_ptr
<
NaiveManagedAllocator
>
allocator
)
:
Allocation
(
underlying_allocation
->
ptr
(),
underlying_allocation
->
size
(),
underlying_allocation
->
place
()),
underlying_allocation_
(
std
::
move
(
underlying_allocation
)),
allocator_
(
allocator
)
{}
~
NaiveManagedAllocation
()
final
;
private:
std
::
unique_ptr
<
Allocation
>
underlying_allocation_
;
std
::
weak_ptr
<
NaiveManagedAllocator
>
allocator_
;
};
class
NaiveManagedAllocator
:
public
ManagedAllocator
,
public
std
::
enable_shared_from_this
<
NaiveManagedAllocator
>
{
public:
template
<
typename
...
ARGS
>
static
std
::
shared_ptr
<
ManagedAllocator
>
Create
(
ARGS
...
args
)
{
return
std
::
static_pointer_cast
<
ManagedAllocator
>
(
std
::
shared_ptr
<
NaiveManagedAllocator
>
(
new
NaiveManagedAllocator
(
std
::
move
(
args
)...)));
}
inline
UnmanagedAllocator
&
UnderlyingAllocator
()
{
return
*
underlying_allocator_
;
}
bool
IsAllocThreadSafe
()
const
override
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
=
kDefault
)
override
;
private:
explicit
NaiveManagedAllocator
(
std
::
unique_ptr
<
Allocator
>&&
allocator
);
explicit
NaiveManagedAllocator
(
std
::
unique_ptr
<
UnmanagedAllocator
>&&
allocator
);
void
Init
(
std
::
unique_ptr
<
UnmanagedAllocator
>&&
allocator
);
std
::
unique_ptr
<
UnmanagedAllocator
>
underlying_allocator_
;
};
}
// namespace allocation
}
// namespace memory
}
// namespace paddle
paddle/fluid/memory/allocation/retry_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -18,29 +18,25 @@ namespace paddle {
...
@@ -18,29 +18,25 @@ namespace paddle {
namespace
memory
{
namespace
memory
{
namespace
allocation
{
namespace
allocation
{
RetryAllocation
::~
RetryAllocation
()
{
bool
RetryAllocator
::
IsAllocThreadSafe
()
const
{
auto
allocator
=
retry_allocator_
.
lock
();
return
underlying_allocator_
->
IsAllocThreadSafe
();
// Allocator is destroyed before allocation. Should not happened usually.
if
(
UNLIKELY
(
allocator
==
nullptr
))
return
;
allocator
->
FreeUnderlyingAllocation
(
std
::
move
(
underlying_allocation_
));
}
}
bool
RetryAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
void
RetryAllocator
::
Free
(
MannualFreeAllocation
*
allocation
)
{
reinterpret_cast
<
RetryAllocation
*>
(
allocation
)
std
::
shared_ptr
<
Allocation
>
RetryAllocator
::
AllocateShared
(
->
underlying_allocation_
.
reset
();
size_t
size
,
Allocator
::
Attr
attr
)
{
{
return
std
::
shared_ptr
<
Allocation
>
(
AllocateImpl
(
size
,
attr
));
// notify all waited allocators, they can try to allocate memory after free.
}
std
::
lock_guard
<
std
::
mutex
>
lock
(
mutex_
);
cv_
.
notify_all
();
std
::
unique_ptr
<
Allocation
>
RetryAllocator
::
Allocate
(
size_t
size
,
}
Allocator
::
Attr
attr
)
{
return
std
::
unique_ptr
<
Allocation
>
(
AllocateImpl
(
size
,
attr
));
}
}
Allocation
*
RetryAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
MannualFreeAllocation
*
RetryAllocator
::
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
{
auto
alloc_func
=
[
&
,
this
]()
{
auto
alloc_func
=
[
&
,
this
]()
{
return
new
RetryAllocation
(
underlying_allocator_
->
Allocate
(
size
,
attr
),
return
new
RetryAllocation
(
underlying_allocator_
->
Allocate
(
size
,
attr
),
this
->
shared_from_this
()
);
this
);
};
};
// In fact, we can unify the code of allocation success and failure
// In fact, we can unify the code of allocation success and failure
// But it would add lock even when allocation success at the first time
// But it would add lock even when allocation success at the first time
...
@@ -73,15 +69,6 @@ Allocation* RetryAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
...
@@ -73,15 +69,6 @@ Allocation* RetryAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
throw
;
throw
;
}
}
}
}
void
RetryAllocator
::
FreeUnderlyingAllocation
(
std
::
unique_ptr
<
Allocation
>&&
allocation
)
{
underlying_allocator_
->
FreeUniquePtr
(
std
::
move
(
allocation
));
{
// notify all waited allocators, they can try to allocate memory after free.
std
::
lock_guard
<
std
::
mutex
>
lock
(
mutex_
);
cv_
.
notify_all
();
}
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
...
...
paddle/fluid/memory/allocation/retry_allocator.h
浏览文件 @
ea81f8ee
...
@@ -26,52 +26,27 @@ namespace allocation {
...
@@ -26,52 +26,27 @@ namespace allocation {
class
RetryAllocator
;
class
RetryAllocator
;
class
RetryAllocation
:
public
Allocation
{
class
RetryAllocation
:
public
MannualFree
Allocation
{
public:
public:
RetryAllocation
(
std
::
unique_ptr
<
Allocation
>&&
underlying_allocation
,
RetryAllocation
(
std
::
unique_ptr
<
Allocation
>&&
underlying_allocation
,
const
std
::
shared_ptr
<
RetryAllocator
>&
retry_allocator
)
MannualFreeAllocator
*
allocator
)
:
Allocation
(
underlying_allocation
->
ptr
(),
underlying_allocation
->
size
(),
:
MannualFreeAllocation
(
allocator
,
underlying_allocation
->
ptr
(),
underlying_allocation
->
place
()),
underlying_allocation
->
size
(),
underlying_allocation_
(
std
::
move
(
underlying_allocation
)),
underlying_allocation
->
place
()),
retry_allocator_
(
retry_allocator
)
{}
underlying_allocation_
(
std
::
move
(
underlying_allocation
))
{}
~
RetryAllocation
()
final
;
private:
std
::
unique_ptr
<
Allocation
>
underlying_allocation_
;
std
::
unique_ptr
<
Allocation
>
underlying_allocation_
;
std
::
weak_ptr
<
RetryAllocator
>
retry_allocator_
;
};
};
class
RetryAllocator
:
public
ManagedAllocator
,
class
RetryAllocator
:
public
MannualFreeAllocator
{
public
std
::
enable_shared_from_this
<
RetryAllocator
>
{
public:
private:
RetryAllocator
(
std
::
unique_ptr
<
Allocator
>&&
allocator
,
size_t
retry_ms
)
RetryAllocator
(
std
::
unique_ptr
<
Allocator
>&&
allocator
,
size_t
retry_ms
)
:
underlying_allocator_
(
:
underlying_allocator_
(
std
::
move
(
allocator
)),
retry_time_
(
retry_ms
)
{
dynamic_cast
<
UnmanagedAllocator
*>
(
allocator
.
release
())),
retry_time_
(
retry_ms
)
{
EnforceCheck
();
EnforceCheck
();
}
}
public:
template
<
typename
...
Args
>
static
std
::
shared_ptr
<
ManagedAllocator
>
Create
(
Args
...
args
)
{
return
std
::
shared_ptr
<
ManagedAllocator
>
(
new
RetryAllocator
(
std
::
forward
<
Args
>
(
args
)...));
}
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
void
FreeUnderlyingAllocation
(
std
::
unique_ptr
<
Allocation
>&&
allocation
);
private:
private:
Allocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
);
void
EnforceCheck
()
{
void
EnforceCheck
()
{
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
underlying_allocator_
.
get
(),
underlying_allocator_
.
get
(),
...
@@ -80,7 +55,13 @@ class RetryAllocator : public ManagedAllocator,
...
@@ -80,7 +55,13 @@ class RetryAllocator : public ManagedAllocator,
"UnderlyingAllocator of RetryAllocator must be thread-safe"
);
"UnderlyingAllocator of RetryAllocator must be thread-safe"
);
}
}
std
::
unique_ptr
<
UnmanagedAllocator
>
underlying_allocator_
;
protected:
void
Free
(
MannualFreeAllocation
*
allocation
)
override
;
MannualFreeAllocation
*
AllocateImpl
(
size_t
size
,
Allocator
::
Attr
attr
)
override
;
private:
std
::
unique_ptr
<
Allocator
>
underlying_allocator_
;
std
::
chrono
::
milliseconds
retry_time_
;
std
::
chrono
::
milliseconds
retry_time_
;
std
::
mutex
mutex_
;
std
::
mutex
mutex_
;
std
::
condition_variable
cv_
;
std
::
condition_variable
cv_
;
...
...
paddle/fluid/memory/allocation/
naive_managed_allocator_test.cc
→
paddle/fluid/memory/allocation/
underlying_manual_allocation.h
浏览文件 @
ea81f8ee
...
@@ -12,71 +12,24 @@
...
@@ -12,71 +12,24 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/fluid/memory/allocation/naive_managed_allocator.h"
#pragma once
#include <atomic> // NOLINT
#include <random>
#include "paddle/fluid/memory/allocation/allocator.h"
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
namespace
paddle
{
namespace
paddle
{
namespace
memory
{
namespace
memory
{
namespace
allocation
{
namespace
allocation
{
class
StubAllocator
:
public
UnmanagedAllocator
{
class
UnderlyingManualAllocation
:
public
MannualFreeAllocation
{
public:
public:
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
UnderlyingManualAllocation
(
MannualFreeAllocator
*
allocator
,
Attr
attr
=
kDefault
)
override
{
std
::
unique_ptr
<
Allocation
>
allocation
)
counter_
.
fetch_add
(
1
);
:
MannualFreeAllocation
(
allocator
,
allocation
->
ptr
(),
allocation
->
size
(),
return
std
::
unique_ptr
<
Allocation
>
(
allocation
->
place
()),
new
Allocation
(
nullptr
,
size
,
platform
::
CPUPlace
()));
allocation_
(
std
::
move
(
allocation
))
{}
}
std
::
unique_ptr
<
Allocation
>
allocation_
;
void
FreeUniquePtr
(
std
::
unique_ptr
<
Allocation
>
allocation
)
override
{
counter_
.
fetch_sub
(
1
);
}
bool
IsAllocThreadSafe
()
const
override
{
return
true
;
}
std
::
atomic
<
int
>
counter_
{
0
};
};
};
TEST
(
NaiveManagedAllocator
,
main
)
{
auto
allocator
=
NaiveManagedAllocator
::
Create
(
std
::
unique_ptr
<
Allocator
>
(
new
StubAllocator
()));
auto
th_main
=
[
=
]
{
std
::
random_device
dev
;
std
::
default_random_engine
engine
(
dev
());
std
::
uniform_int_distribution
<
int
>
dist
(
0
,
1
);
std
::
vector
<
std
::
shared_ptr
<
Allocation
>>
allocations
;
for
(
int
j
=
0
;
j
<
1024
;
++
j
)
{
bool
to_insert
=
static_cast
<
bool
>
(
dist
(
engine
));
if
(
to_insert
)
{
allocations
.
emplace_back
(
allocator
->
AllocateShared
(
10
));
}
else
{
if
(
!
allocations
.
empty
())
{
allocations
.
pop_back
();
}
}
}
};
{
std
::
vector
<
std
::
thread
>
threads
;
for
(
size_t
i
=
0
;
i
<
1024
;
++
i
)
{
threads
.
emplace_back
(
th_main
);
}
for
(
auto
&
th
:
threads
)
{
th
.
join
();
}
}
ASSERT_EQ
(
reinterpret_cast
<
StubAllocator
&>
(
std
::
dynamic_pointer_cast
<
NaiveManagedAllocator
>
(
allocator
)
->
UnderlyingAllocator
())
.
counter_
,
0
);
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
}
// namespace paddle
}
// namespace paddle
paddle/fluid/memory/allocation/zero_size_allocator.cc
浏览文件 @
ea81f8ee
...
@@ -26,15 +26,10 @@ std::unique_ptr<Allocation> ZeroSizeAllocator::Allocate(size_t size,
...
@@ -26,15 +26,10 @@ std::unique_ptr<Allocation> ZeroSizeAllocator::Allocate(size_t size,
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
return
underlying_allocator_
->
Allocate
(
size
,
attr
);
}
}
}
}
std
::
shared_ptr
<
Allocation
>
ZeroSizeAllocator
::
AllocateShared
(
size_t
size
,
Allocator
::
Attr
attr
)
{
bool
ZeroSizeAllocator
::
IsAllocThreadSafe
()
const
{
if
(
size
==
0
)
{
return
underlying_allocator_
->
IsAllocThreadSafe
();
return
std
::
shared_ptr
<
Allocation
>
(
new
ZeroSizeAllocation
(
place_
));
}
else
{
return
underlying_allocator_
->
AllocateShared
(
size
,
attr
);
}
}
}
bool
ZeroSizeAllocator
::
IsAllocThreadSafe
()
const
{
return
true
;
}
}
// namespace allocation
}
// namespace allocation
}
// namespace memory
}
// namespace memory
}
// namespace paddle
}
// namespace paddle
paddle/fluid/memory/allocation/zero_size_allocator.h
浏览文件 @
ea81f8ee
...
@@ -12,10 +12,8 @@
...
@@ -12,10 +12,8 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include <utility>
#pragma once
#pragma once
#include <utility>
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/allocation/allocator.h"
namespace
paddle
{
namespace
paddle
{
...
@@ -31,18 +29,17 @@ class ZeroSizeAllocation : public Allocation {
...
@@ -31,18 +29,17 @@ class ZeroSizeAllocation : public Allocation {
:
Allocation
(
nullptr
,
0
,
p
)
{}
:
Allocation
(
nullptr
,
0
,
p
)
{}
};
};
class
ZeroSizeAllocator
:
public
Managed
Allocator
{
class
ZeroSizeAllocator
:
public
Allocator
{
public:
public:
ZeroSizeAllocator
(
ZeroSizeAllocator
(
std
::
shared_ptr
<
Allocator
>
underlying_allocator
,
const
std
::
shared_ptr
<
ManagedAllocator
>&
underlying_allocator
,
const
platform
::
Place
&
p
)
const
platform
::
Place
&
p
)
:
underlying_allocator_
(
std
::
move
(
underlying_allocator
)),
place_
(
p
)
{}
:
underlying_allocator_
(
underlying_allocator
),
place_
(
p
)
{}
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
unique_ptr
<
Allocation
>
Allocate
(
size_t
size
,
Attr
attr
)
override
;
std
::
shared_ptr
<
Allocation
>
AllocateShared
(
size_t
size
,
Attr
attr
)
override
;
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
private:
private:
std
::
shared_ptr
<
Managed
Allocator
>
underlying_allocator_
;
std
::
shared_ptr
<
Allocator
>
underlying_allocator_
;
const
platform
::
Place
&
place_
;
const
platform
::
Place
&
place_
;
};
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录