Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
bb8d7783
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bb8d7783
编写于
10月 29, 2019
作者:
Z
Zeng Jinle
提交者:
GitHub
10月 29, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
lazy init of allocators, test=develop (#20854)
上级
aacd16db
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
43 addition
and
24 deletion
+43
-24
paddle/fluid/memory/allocation/naive_best_fit_allocator.cc
paddle/fluid/memory/allocation/naive_best_fit_allocator.cc
+43
-24
未找到文件。
paddle/fluid/memory/allocation/naive_best_fit_allocator.cc
浏览文件 @
bb8d7783
...
...
@@ -101,25 +101,38 @@ size_t Used<platform::CPUPlace>(const platform::CPUPlace &place) {
}
#ifdef PADDLE_WITH_CUDA
BuddyAllocator
*
GetGPUBuddyAllocator
(
int
gpu_id
)
{
static
std
::
once_flag
init_flag
;
static
detail
::
BuddyAllocator
**
a_arr
=
nullptr
;
static
std
::
vector
<
int
>
devices
;
std
::
call_once
(
init_flag
,
[
gpu_id
]()
{
devices
=
platform
::
GetSelectedDevices
();
int
gpu_num
=
devices
.
size
();
a_arr
=
new
BuddyAllocator
*
[
gpu_num
];
for
(
size_t
i
=
0
;
i
<
devices
.
size
();
++
i
)
{
int
dev_id
=
devices
[
i
];
a_arr
[
i
]
=
nullptr
;
platform
::
SetDeviceId
(
dev_id
);
a_arr
[
i
]
=
new
BuddyAllocator
(
std
::
unique_ptr
<
detail
::
SystemAllocator
>
(
new
detail
::
GPUAllocator
(
dev_id
)),
platform
::
GpuMinChunkSize
(),
platform
::
GpuMaxChunkSize
());
class
GPUBuddyAllocatorList
{
private:
GPUBuddyAllocatorList
()
:
devices_
(
platform
::
GetSelectedDevices
())
{
auto
gpu_num
=
devices_
.
size
();
allocators_
.
resize
(
gpu_num
);
init_flags_
.
reserve
(
gpu_num
);
for
(
size_t
i
=
0
;
i
<
gpu_num
;
++
i
)
{
init_flags_
.
emplace_back
(
new
std
::
once_flag
());
}
}
static
GPUBuddyAllocatorList
*
CreateNewInstance
()
{
return
new
GPUBuddyAllocatorList
();
}
public:
static
GPUBuddyAllocatorList
*
Instance
()
{
static
auto
*
instance
=
CreateNewInstance
();
return
instance
;
}
BuddyAllocator
*
Get
(
int
gpu_id
)
{
auto
pos
=
std
::
distance
(
devices_
.
begin
(),
std
::
find
(
devices_
.
begin
(),
devices_
.
end
(),
gpu_id
));
PADDLE_ENFORCE_LT
(
pos
,
devices_
.
size
());
std
::
call_once
(
*
init_flags_
[
pos
],
[
this
,
pos
]
{
platform
::
SetDeviceId
(
devices_
[
pos
]);
allocators_
[
pos
].
reset
(
new
BuddyAllocator
(
std
::
unique_ptr
<
detail
::
SystemAllocator
>
(
new
detail
::
GPUAllocator
(
devices_
[
pos
])),
platform
::
GpuMinChunkSize
(),
platform
::
GpuMaxChunkSize
()));
VLOG
(
10
)
<<
"
\n\n
NOTE:
\n
"
<<
"You can set GFlags environment variable "
<<
"'FLAGS_fraction_of_gpu_memory_to_use' "
...
...
@@ -132,13 +145,19 @@ BuddyAllocator *GetGPUBuddyAllocator(int gpu_id) {
<<
FLAGS_initial_gpu_memory_in_mb
<<
". Current 'FLAGS_reallocate_gpu_memory_in_mb' value is "
<<
FLAGS_reallocate_gpu_memory_in_mb
<<
"
\n\n
"
;
}
platform
::
SetDeviceId
(
gpu_id
);
});
});
return
allocators_
[
pos
].
get
();
}
auto
pos
=
std
::
distance
(
devices
.
begin
(),
std
::
find
(
devices
.
begin
(),
devices
.
end
(),
gpu_id
));
return
a_arr
[
pos
];
private:
std
::
vector
<
int
>
devices_
;
std
::
vector
<
std
::
unique_ptr
<
std
::
once_flag
>>
init_flags_
;
std
::
vector
<
std
::
unique_ptr
<
BuddyAllocator
>>
allocators_
;
};
BuddyAllocator
*
GetGPUBuddyAllocator
(
int
gpu_id
)
{
return
GPUBuddyAllocatorList
::
Instance
()
->
Get
(
gpu_id
);
}
#endif
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录