Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
b2a7261d
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b2a7261d
编写于
1月 25, 2022
作者:
F
From00
提交者:
GitHub
1月 25, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add GetBasePtr interface in paddle::memory (#39145)
上级
529f1425
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
47 addition
and
27 deletion
+47
-27
paddle/fluid/memory/CMakeLists.txt
paddle/fluid/memory/CMakeLists.txt
+7
-0
paddle/fluid/memory/allocation/CMakeLists.txt
paddle/fluid/memory/allocation/CMakeLists.txt
+0
-7
paddle/fluid/memory/allocation/allocator.h
paddle/fluid/memory/allocation/allocator.h
+1
-8
paddle/fluid/memory/allocation/allocator_facade.cc
paddle/fluid/memory/allocation/allocator_facade.cc
+19
-0
paddle/fluid/memory/allocation/allocator_facade.h
paddle/fluid/memory/allocation/allocator_facade.h
+2
-0
paddle/fluid/memory/get_base_ptr_test.cu
paddle/fluid/memory/get_base_ptr_test.cu
+12
-12
paddle/fluid/memory/malloc.cc
paddle/fluid/memory/malloc.cc
+4
-0
paddle/fluid/memory/malloc.h
paddle/fluid/memory/malloc.h
+2
-0
未找到文件。
paddle/fluid/memory/CMakeLists.txt
浏览文件 @
b2a7261d
...
...
@@ -34,6 +34,13 @@ if (WITH_ROCM)
DEPS device_context malloc
)
endif
()
if
(
WITH_GPU AND WITH_TESTING AND NOT
"$ENV{CI_SKIP_CPP_TEST}"
STREQUAL
"ON"
)
nv_test
(
get_base_ptr_test SRCS get_base_ptr_test.cu DEPS malloc gpu_info
)
set_tests_properties
(
get_base_ptr_test PROPERTIES
ENVIRONMENT
"FLAGS_allocator_strategy=auto_growth;
FLAGS_use_stream_safe_cuda_allocator=true;"
)
endif
()
#if (WITH_GPU)
# nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place memory)
#endif()
paddle/fluid/memory/allocation/CMakeLists.txt
浏览文件 @
b2a7261d
...
...
@@ -125,10 +125,3 @@ if(NOT WIN32)
cc_library
(
mmap_allocator SRCS mmap_allocator.cc DEPS allocator
)
cc_test
(
mmap_allocator_test SRCS mmap_allocator_test.cc DEPS mmap_allocator allocator
)
endif
(
NOT WIN32
)
if
(
WITH_GPU AND WITH_TESTING AND NOT
"$ENV{CI_SKIP_CPP_TEST}"
STREQUAL
"ON"
)
nv_test
(
base_ptr_test SRCS base_ptr_test.cu DEPS malloc gpu_info
)
set_tests_properties
(
base_ptr_test PROPERTIES
ENVIRONMENT
"FLAGS_allocator_strategy=auto_growth;
FLAGS_use_stream_safe_cuda_allocator=true;"
)
endif
()
paddle/fluid/memory/allocation/allocator.h
浏览文件 @
b2a7261d
...
...
@@ -93,14 +93,7 @@ class Allocation : public pten::Allocation {
const
platform
::
Place
&
place
)
:
pten
::
Allocation
(
ptr
,
size
,
place
),
base_ptr_
(
base_ptr
)
{}
void
*
base_ptr
()
const
{
PADDLE_ENFORCE_EQ
(
FLAGS_allocator_strategy
,
"auto_growth"
,
paddle
::
platform
::
errors
::
Unimplemented
(
"base_ptr() is only implemented for auto_growth "
"strategy, not support %s strategy"
,
FLAGS_allocator_strategy
));
return
base_ptr_
;
}
void
*
base_ptr
()
const
{
return
base_ptr_
;
}
private:
inline
void
RegisterDecoratedAllocator
(
Allocator
*
allocator
)
{
...
...
paddle/fluid/memory/allocation/allocator_facade.cc
浏览文件 @
b2a7261d
...
...
@@ -282,6 +282,10 @@ class AllocatorFacadePrivate {
return
iter
->
second
;
}
void
*
GetBasePtr
(
const
std
::
shared_ptr
<
pten
::
Allocation
>&
allocation
)
{
return
static_cast
<
Allocation
*>
(
allocation
.
get
())
->
base_ptr
();
}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
bool
HasCUDAAllocator
(
const
platform
::
CUDAPlace
&
place
,
const
gpuStream_t
&
stream
)
{
...
...
@@ -821,6 +825,21 @@ const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
return
m_
->
GetAllocator
(
place
,
/* A non-zero num to choose allocator_ */
1
);
}
void
*
AllocatorFacade
::
GetBasePtr
(
const
std
::
shared_ptr
<
pten
::
Allocation
>&
allocation
)
{
PADDLE_ENFORCE_EQ
(
GetAllocatorStrategy
(),
AllocatorStrategy
::
kAutoGrowth
,
paddle
::
platform
::
errors
::
Unimplemented
(
"GetBasePtr() is only implemented for auto_growth "
"strategy, not support allocator strategy: %d"
,
static_cast
<
int
>
(
GetAllocatorStrategy
())));
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
allocation
->
place
()),
true
,
paddle
::
platform
::
errors
::
Unimplemented
(
"GetBasePtr() is only implemented for CUDAPlace(), not "
"suppot place: %s"
,
allocation
->
place
()));
return
m_
->
GetBasePtr
(
allocation
);
}
std
::
shared_ptr
<
pten
::
Allocation
>
AllocatorFacade
::
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
)
{
return
std
::
shared_ptr
<
pten
::
Allocation
>
(
Alloc
(
place
,
size
));
...
...
paddle/fluid/memory/allocation/allocator_facade.h
浏览文件 @
b2a7261d
...
...
@@ -51,6 +51,8 @@ class AllocatorFacade {
const
std
::
shared_ptr
<
Allocator
>&
GetAllocator
(
const
platform
::
Place
&
place
);
void
*
GetBasePtr
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
);
// Allocate a shared allocation.
std
::
shared_ptr
<
Allocation
>
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
);
...
...
paddle/fluid/memory/
allocation/
base_ptr_test.cu
→
paddle/fluid/memory/
get_
base_ptr_test.cu
浏览文件 @
b2a7261d
...
...
@@ -35,9 +35,9 @@ class CUDAAllocatoionBasePtrTest : public ::testing::Test {
void
OneByOneAllocTest
()
{
for
(
size_t
i
=
0
;
i
<
alloc_times_
;
++
i
)
{
size_t
size
=
dis_
(
random_engine_
);
AllocationPtr
allocation
=
Alloc
(
place_
,
size
);
auto
allocation
=
AllocShared
(
place_
,
size
);
void
*
base_ptr
=
static_cast
<
Allocation
*>
(
allocation
.
get
())
->
base_ptr
(
);
void
*
base_ptr
=
GetBasePtr
(
allocation
);
void
*
system_ptr
=
platform
::
GetGpuBasePtr
(
allocation
->
ptr
(),
place_
.
GetDeviceId
());
EXPECT_EQ
(
base_ptr
,
system_ptr
);
...
...
@@ -47,21 +47,21 @@ class CUDAAllocatoionBasePtrTest : public ::testing::Test {
}
void
BatchByBatchAllocTest
()
{
std
::
vector
<
AllocationPtr
>
allocations
;
std
::
vector
<
std
::
shared_ptr
<
pten
::
Allocation
>
>
allocations
;
allocations
.
reserve
(
batch_size_
);
size_t
batch_num
=
alloc_times_
/
batch_size_
;
for
(
size_t
i
=
0
;
i
<
batch_num
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
batch_size_
;
++
j
)
{
size_t
size
=
dis_
(
random_engine_
);
AllocationPtr
allocation
=
Alloc
(
place_
,
size
);
auto
allocation
=
AllocShared
(
place_
,
size
);
void
*
base_ptr
=
static_cast
<
Allocation
*>
(
allocation
.
get
())
->
base_ptr
(
);
void
*
base_ptr
=
GetBasePtr
(
allocation
);
void
*
system_ptr
=
platform
::
GetGpuBasePtr
(
allocation
->
ptr
(),
place_
.
GetDeviceId
());
EXPECT_EQ
(
base_ptr
,
system_ptr
);
allocations
.
emplace_back
(
std
::
move
(
allocation
)
);
allocations
.
emplace_back
(
allocation
);
}
allocations
.
clear
();
}
...
...
@@ -70,19 +70,19 @@ class CUDAAllocatoionBasePtrTest : public ::testing::Test {
}
void
ContinuousAllocTest
()
{
std
::
vector
<
AllocationPtr
>
allocations
;
std
::
vector
<
std
::
shared_ptr
<
pten
::
Allocation
>
>
allocations
;
allocations
.
reserve
(
alloc_times_
);
for
(
size_t
i
=
0
;
i
<
alloc_times_
;
++
i
)
{
size_t
size
=
dis_
(
random_engine_
);
AllocationPtr
allocation
=
Alloc
(
place_
,
size
);
auto
allocation
=
AllocShared
(
place_
,
size
);
void
*
base_ptr
=
static_cast
<
Allocation
*>
(
allocation
.
get
())
->
base_ptr
(
);
void
*
base_ptr
=
GetBasePtr
(
allocation
);
void
*
system_ptr
=
platform
::
GetGpuBasePtr
(
allocation
->
ptr
(),
place_
.
GetDeviceId
());
EXPECT_EQ
(
base_ptr
,
system_ptr
);
allocations
.
emplace_back
(
std
::
move
(
allocation
)
);
allocations
.
emplace_back
(
allocation
);
}
allocations
.
clear
();
...
...
@@ -90,8 +90,8 @@ class CUDAAllocatoionBasePtrTest : public ::testing::Test {
}
void
ZeroSizeAllocTest
()
{
AllocationPtr
allocation
=
Alloc
(
place_
,
0
);
void
*
base_ptr
=
static_cast
<
Allocation
*>
(
allocation
.
get
())
->
base_ptr
(
);
auto
allocation
=
AllocShared
(
place_
,
0
);
void
*
base_ptr
=
GetBasePtr
(
allocation
);
void
*
system_ptr
=
platform
::
GetGpuBasePtr
(
allocation
->
ptr
(),
place_
.
GetDeviceId
());
EXPECT_EQ
(
base_ptr
,
system_ptr
);
...
...
paddle/fluid/memory/malloc.cc
浏览文件 @
b2a7261d
...
...
@@ -47,6 +47,10 @@ bool InSameStream(const std::shared_ptr<Allocation>& allocation,
stream
);
}
void
*
GetBasePtr
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
)
{
return
allocation
::
AllocatorFacade
::
Instance
().
GetBasePtr
(
allocation
);
}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
AllocationPtr
Alloc
(
const
platform
::
CUDAPlace
&
place
,
size_t
size
,
const
gpuStream_t
&
stream
)
{
...
...
paddle/fluid/memory/malloc.h
浏览文件 @
b2a7261d
...
...
@@ -44,6 +44,8 @@ extern std::shared_ptr<Allocation> AllocShared(const platform::Place& place,
extern
bool
InSameStream
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
,
const
platform
::
Stream
&
stream
);
extern
void
*
GetBasePtr
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
extern
AllocationPtr
Alloc
(
const
platform
::
CUDAPlace
&
place
,
size_t
size
,
const
gpuStream_t
&
stream
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录