Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
d424115f
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d424115f
编写于
11月 19, 2018
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Clean code
test=develop
上级
b12c77da
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
29 addition
and
42 deletion
+29
-42
paddle/fluid/framework/tensor_util.cc
paddle/fluid/framework/tensor_util.cc
+0
-1
paddle/fluid/memory/allocation/allocator_facade.cc
paddle/fluid/memory/allocation/allocator_facade.cc
+28
-33
paddle/fluid/memory/allocation/best_fit_allocator.cc
paddle/fluid/memory/allocation/best_fit_allocator.cc
+1
-1
paddle/fluid/memory/allocation/best_fit_allocator.h
paddle/fluid/memory/allocation/best_fit_allocator.h
+0
-4
paddle/fluid/memory/allocation/best_fit_allocator_test.cu
paddle/fluid/memory/allocation/best_fit_allocator_test.cu
+0
-1
paddle/fluid/memory/allocation/conditional_allocator.h
paddle/fluid/memory/allocation/conditional_allocator.h
+0
-2
未找到文件。
paddle/fluid/framework/tensor_util.cc
浏览文件 @
d424115f
...
@@ -15,7 +15,6 @@
...
@@ -15,7 +15,6 @@
#include <algorithm>
#include <algorithm>
#include <limits>
#include <limits>
#include <vector>
#include <vector>
#include "../memory/allocation/allocator.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/data_type.h"
namespace
paddle
{
namespace
paddle
{
...
...
paddle/fluid/memory/allocation/allocator_facade.cc
浏览文件 @
d424115f
...
@@ -64,11 +64,11 @@ class CPUManagedAllocator : public Allocator {
...
@@ -64,11 +64,11 @@ class CPUManagedAllocator : public Allocator {
};
};
// TODO(yy): Dirty code here. This class should be configurable in runtime.
// TODO(yy): Dirty code here. This class should be configurable in runtime.
class
Chunked
Managed
Allocator
:
public
Allocator
{
class
ChunkedAllocator
:
public
Allocator
{
public:
public:
explicit
Chunked
Managed
Allocator
(
std
::
unique_ptr
<
Allocator
>
system_allocator
,
explicit
ChunkedAllocator
(
std
::
unique_ptr
<
Allocator
>
system_allocator
,
size_t
max_chunk_size
,
size_t
capacity
=
1
,
size_t
max_chunk_size
,
size_t
capacity
=
1
,
int64_t
retry_time
=
-
1
)
int64_t
retry_time
=
-
1
)
:
max_chunk_size_
(
max_chunk_size
),
retry_time_
(
retry_time
)
{
:
max_chunk_size_
(
max_chunk_size
),
retry_time_
(
retry_time
)
{
raw_allocator_
=
std
::
move
(
system_allocator
);
raw_allocator_
=
std
::
move
(
system_allocator
);
...
@@ -78,12 +78,12 @@ class ChunkedManagedAllocator : public Allocator {
...
@@ -78,12 +78,12 @@ class ChunkedManagedAllocator : public Allocator {
if
(
capacity
==
1
)
{
if
(
capacity
==
1
)
{
VLOG
(
10
)
<<
"Create BestFitAllocator with chunk_size "
VLOG
(
10
)
<<
"Create BestFitAllocator with chunk_size "
<<
max_chunk_size_
;
<<
max_chunk_size_
;
default_allocator_
=
BestFitAllocatorCreator
();
default_allocator_
=
CreateAllocatorWithChunk
();
}
else
{
}
else
{
VLOG
(
10
)
<<
"Create AutoIncrementAllocator with chunk_size "
VLOG
(
10
)
<<
"Create AutoIncrementAllocator with chunk_size "
<<
max_chunk_size_
<<
" and capacity "
<<
capacity
;
<<
max_chunk_size_
<<
" and capacity "
<<
capacity
;
default_allocator_
=
std
::
make_shared
<
AutoIncrementAllocator
>
(
default_allocator_
=
std
::
make_shared
<
AutoIncrementAllocator
>
(
[
this
]
{
return
std
::
move
(
BestFitAllocatorCreator
());
},
capacity
);
[
this
]
{
return
std
::
move
(
CreateAllocatorWithChunk
());
},
capacity
);
}
}
}
}
...
@@ -100,30 +100,26 @@ class ChunkedManagedAllocator : public Allocator {
...
@@ -100,30 +100,26 @@ class ChunkedManagedAllocator : public Allocator {
default_allocator_
.
reset
(
cond_allocator
);
default_allocator_
.
reset
(
cond_allocator
);
}
}
~
Chunked
ManagedAllocator
()
{
~
Chunked
Allocator
()
override
{
// Specify destruct order.
// Specify destruct order.
default_allocator_
.
reset
();
default_allocator_
.
reset
();
chunks_
.
clear
();
chunks_
.
clear
();
raw_allocator_
.
reset
();
raw_allocator_
.
reset
();
}
}
std
::
shared_ptr
<
Allocator
>
BestFitAllocatorCreator
()
{
std
::
shared_ptr
<
Allocator
>
CreateAllocatorWithChunk
()
{
chunks_
.
emplace_back
(
raw_allocator_
->
Allocate
(
max_chunk_size_
));
chunks_
.
emplace_back
(
raw_allocator_
->
Allocate
(
max_chunk_size_
));
auto
*
allocation
=
chunks_
.
back
().
get
();
auto
*
allocation
=
chunks_
.
back
().
get
();
std
::
unique_ptr
<
Allocator
>
unmanaged_
allocator
(
new
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>
allocator
(
new
LockedAllocator
(
std
::
unique_ptr
<
Allocator
>
(
new
BestFitAllocator
(
allocation
))));
std
::
unique_ptr
<
Allocator
>
(
new
BestFitAllocator
(
allocation
))));
if
(
retry_time_
<=
0
)
{
if
(
retry_time_
>
0
)
{
VLOG
(
10
)
<<
"Create NaiveManagedAllocator without retry"
;
auto
*
retry_allocator
=
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
new
RetryAllocator
(
std
::
move
(
allocator
),
retry_time_
);
std
::
move
(
unmanaged_allocator
));
allocator
.
reset
(
retry_allocator
);
}
else
{
VLOG
(
10
)
<<
"Create RetryAllocator with retry_time "
<<
retry_time_
<<
"ms"
;
auto
tmp
=
std
::
make_shared
<
RetryAllocator
>
(
std
::
move
(
unmanaged_allocator
),
static_cast
<
size_t
>
(
retry_time_
));
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
tmp
);
}
}
return
std
::
make_shared
<
AlignedAllocator
<
64u
>>
(
std
::
move
(
allocator
));
}
}
bool
IsAllocThreadSafe
()
const
override
{
return
true
;
}
bool
IsAllocThreadSafe
()
const
override
{
return
true
;
}
...
@@ -143,13 +139,13 @@ class ChunkedManagedAllocator : public Allocator {
...
@@ -143,13 +139,13 @@ class ChunkedManagedAllocator : public Allocator {
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA
class
CUDA
ManagedAllocator
:
public
ChunkedManag
edAllocator
{
class
CUDA
ChunkedAllocator
:
public
Chunk
edAllocator
{
public:
public:
explicit
CUDA
Manag
edAllocator
(
int
dev_id
)
explicit
CUDA
Chunk
edAllocator
(
int
dev_id
)
:
Chunked
ManagedAllocator
(
:
Chunked
Allocator
(
std
::
unique_ptr
<
Allocator
>
(
std
::
unique_ptr
<
Allocator
>
(
new
CUDAAllocator
(
platform
::
CUDAPlace
(
dev_id
))),
new
CUDAAllocator
(
platform
::
CUDAPlace
(
dev_id
))
),
GetMaxChunkSize
(
dev_id
),
GetCapcity
(
dev_id
),
GetMaxChunkSize
(
dev_id
),
GetCapcity
(
dev_id
),
GetRetryTime
())
{}
GetRetryTime
())
{}
private:
private:
static
size_t
GetMaxChunkSize
(
int
dev_id
)
{
static
size_t
GetMaxChunkSize
(
int
dev_id
)
{
...
@@ -168,13 +164,12 @@ class CUDAManagedAllocator : public ChunkedManagedAllocator {
...
@@ -168,13 +164,12 @@ class CUDAManagedAllocator : public ChunkedManagedAllocator {
static
int64_t
GetRetryTime
()
{
return
FLAGS_gpu_allocator_retry_time
;
}
static
int64_t
GetRetryTime
()
{
return
FLAGS_gpu_allocator_retry_time
;
}
};
};
class
CUDAPinned
ManagedAllocator
:
public
ChunkedManag
edAllocator
{
class
CUDAPinned
ChunkedAllocator
:
public
Chunk
edAllocator
{
public:
public:
CUDAPinnedManagedAllocator
()
CUDAPinnedChunkedAllocator
()
:
ChunkedManagedAllocator
(
:
ChunkedAllocator
(
std
::
unique_ptr
<
Allocator
>
(
new
CPUPinnedAllocator
()),
std
::
unique_ptr
<
Allocator
>
(
new
CPUPinnedAllocator
()),
platform
::
CUDAPinnedMaxChunkSize
(),
GetCapacity
(),
platform
::
CUDAPinnedMaxChunkSize
(),
GetCapacity
(),
-
1
)
{
-
1
)
{}
// never retry
}
// never retry
private:
private:
static
size_t
GetCapacity
()
{
static
size_t
GetCapacity
()
{
...
@@ -226,7 +221,7 @@ class AllocatorFacadePrivate {
...
@@ -226,7 +221,7 @@ class AllocatorFacadePrivate {
int
device_count
=
platform
::
GetCUDADeviceCount
();
int
device_count
=
platform
::
GetCUDADeviceCount
();
for
(
int
dev_id
=
0
;
dev_id
<
device_count
;
++
dev_id
)
{
for
(
int
dev_id
=
0
;
dev_id
<
device_count
;
++
dev_id
)
{
allocators_
[
platform
::
CUDAPlace
(
dev_id
)]
=
allocators_
[
platform
::
CUDAPlace
(
dev_id
)]
=
std
::
make_shared
<
CUDA
Manag
edAllocator
>
(
dev_id
);
std
::
make_shared
<
CUDA
Chunk
edAllocator
>
(
dev_id
);
}
}
#endif
#endif
}
}
...
@@ -234,7 +229,7 @@ class AllocatorFacadePrivate {
...
@@ -234,7 +229,7 @@ class AllocatorFacadePrivate {
void
InitCUDAPinnedAllocator
()
{
void
InitCUDAPinnedAllocator
()
{
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA
allocators_
[
platform
::
CUDAPinnedPlace
()]
=
allocators_
[
platform
::
CUDAPinnedPlace
()]
=
std
::
make_shared
<
CUDAPinned
Manag
edAllocator
>
();
std
::
make_shared
<
CUDAPinned
Chunk
edAllocator
>
();
#endif
#endif
}
}
...
...
paddle/fluid/memory/allocation/best_fit_allocator.cc
浏览文件 @
d424115f
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
// limitations under the License.
// limitations under the License.
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include <
bits/stdc++.
h>
#include <
cmat
h>
#include <list>
#include <list>
#include <map>
#include <map>
#include <string>
#include <string>
...
...
paddle/fluid/memory/allocation/best_fit_allocator.h
浏览文件 @
d424115f
...
@@ -106,10 +106,6 @@ class BestFitAllocator : public Allocator {
...
@@ -106,10 +106,6 @@ class BestFitAllocator : public Allocator {
const
platform
::
Place
&
Place
()
const
{
return
allocation_
->
place
();
}
const
platform
::
Place
&
Place
()
const
{
return
allocation_
->
place
();
}
// std::unique_ptr<Allocation> Allocate(size_t size,
// Attr attr = kDefault) override;
// void FreeUniquePtr(std::unique_ptr<Allocation> allocation) override;
size_t
NumFreeChunks
()
const
;
size_t
NumFreeChunks
()
const
;
private:
private:
...
...
paddle/fluid/memory/allocation/best_fit_allocator_test.cu
浏览文件 @
d424115f
...
@@ -80,7 +80,6 @@ TEST(BestFitAllocator, concurrent_cuda) {
...
@@ -80,7 +80,6 @@ TEST(BestFitAllocator, concurrent_cuda) {
th
.
join
();
th
.
join
();
}
}
}
}
// allocator.FreeUniquePtr(std::move(cuda_allocation));
}
}
}
// namespace allocation
}
// namespace allocation
...
...
paddle/fluid/memory/allocation/conditional_allocator.h
浏览文件 @
d424115f
...
@@ -45,8 +45,6 @@ class ConditionalAllocator : public Allocator {
...
@@ -45,8 +45,6 @@ class ConditionalAllocator : public Allocator {
ConditionalAllocator
&
AddAllocator
(
std
::
function
<
bool
(
size_t
,
Attr
)
>
func
,
ConditionalAllocator
&
AddAllocator
(
std
::
function
<
bool
(
size_t
,
Attr
)
>
func
,
std
::
shared_ptr
<
Allocator
>
allocator
);
std
::
shared_ptr
<
Allocator
>
allocator
);
// AllocationPtr Allocate(size_t size, Attr attr) override;
bool
IsAllocThreadSafe
()
const
override
;
bool
IsAllocThreadSafe
()
const
override
;
protected:
protected:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录