Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
39004080
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
39004080
编写于
3月 26, 2018
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
replace use_pinned with is_pinned
上级
eaa90d38
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
39 addition
and
40 deletion
+39
-40
paddle/fluid/framework/tensor.h
paddle/fluid/framework/tensor.h
+12
-12
paddle/fluid/framework/tensor_impl.h
paddle/fluid/framework/tensor_impl.h
+11
-11
paddle/fluid/memory/detail/system_allocator.cc
paddle/fluid/memory/detail/system_allocator.cc
+4
-3
paddle/fluid/memory/memory.cc
paddle/fluid/memory/memory.cc
+6
-6
paddle/fluid/memory/memory.h
paddle/fluid/memory/memory.h
+6
-8
未找到文件。
paddle/fluid/framework/tensor.h
浏览文件 @
39004080
...
...
@@ -45,11 +45,11 @@ class Tensor {
friend
struct
EigenVector
;
public:
Tensor
()
:
offset_
(
0
),
use
_pinned_
(
false
)
{}
Tensor
()
:
offset_
(
0
),
is
_pinned_
(
false
)
{}
/*! Constructor with place should only be used in pybind. */
explicit
Tensor
(
const
platform
::
Place
&
place
)
:
offset_
(
0
),
use
_pinned_
(
false
)
{
:
offset_
(
0
),
is
_pinned_
(
false
)
{
holder_
->
set_place
(
place
);
}
...
...
@@ -70,12 +70,12 @@ class Tensor {
* @note If not exist, then allocation.
*/
template
<
typename
T
>
inline
T
*
mutable_data
(
platform
::
Place
place
,
bool
use
_pinned
=
false
);
inline
T
*
mutable_data
(
platform
::
Place
place
,
bool
is
_pinned
=
false
);
inline
void
*
mutable_data
(
platform
::
Place
place
,
std
::
type_index
type
,
bool
use
_pinned
=
false
);
bool
is
_pinned
=
false
);
inline
void
*
mutable_data
(
platform
::
Place
place
,
bool
use
_pinned
=
false
);
inline
void
*
mutable_data
(
platform
::
Place
place
,
bool
is
_pinned
=
false
);
/**
* @brief Return a pointer to mutable memory block.
...
...
@@ -87,7 +87,7 @@ class Tensor {
*/
template
<
typename
T
>
inline
T
*
mutable_data
(
DDim
dims
,
platform
::
Place
place
,
bool
use
_pinned
=
false
);
bool
is
_pinned
=
false
);
/*! Return the dimensions of the memory block. */
inline
const
DDim
&
dims
()
const
;
...
...
@@ -153,13 +153,13 @@ class Tensor {
template
<
typename
Place
>
struct
PlaceholderImpl
:
public
Placeholder
{
PlaceholderImpl
(
Place
place
,
size_t
size
,
std
::
type_index
type
,
bool
use
_pinned
=
false
)
:
ptr_
(
static_cast
<
uint8_t
*>
(
memory
::
Alloc
(
place
,
size
,
use
_pinned
)),
memory
::
PODDeleter
<
uint8_t
,
Place
>
(
place
,
use
_pinned
)),
bool
is
_pinned
=
false
)
:
ptr_
(
static_cast
<
uint8_t
*>
(
memory
::
Alloc
(
place
,
size
,
is
_pinned
)),
memory
::
PODDeleter
<
uint8_t
,
Place
>
(
place
,
is
_pinned
)),
place_
(
place
),
size_
(
size
),
type_
(
type
),
use_pinned_
(
use
_pinned
)
{
is_pinned_
(
is
_pinned
)
{
PADDLE_ENFORCE_NOT_NULL
(
ptr_
,
"Insufficient %s memory to allocation."
,
(
is_cpu_place
(
place_
)
?
"CPU"
:
"GPU"
));
}
...
...
@@ -184,7 +184,7 @@ class Tensor {
std
::
type_index
type_
;
/*! use pinned memory or not. */
bool
use
_pinned_
;
bool
is
_pinned_
;
};
/*! holds the memory block if allocated. */
...
...
@@ -219,7 +219,7 @@ class Tensor {
* PlaceHolder::ptr_ and where the tensor data really begins.
*/
size_t
offset_
;
bool
use
_pinned_
;
bool
is
_pinned_
;
};
inline
void
Tensor
::
switch_place
(
platform
::
Place
new_place
)
{
...
...
paddle/fluid/framework/tensor_impl.h
浏览文件 @
39004080
...
...
@@ -102,20 +102,20 @@ inline T* Tensor::data() {
template
<
typename
T
>
inline
T
*
Tensor
::
mutable_data
(
DDim
dims
,
platform
::
Place
place
,
bool
use
_pinned
)
{
bool
is
_pinned
)
{
static_assert
(
std
::
is_pod
<
T
>::
value
,
"T must be POD"
);
Resize
(
dims
);
return
mutable_data
<
T
>
(
place
,
use
_pinned
);
return
mutable_data
<
T
>
(
place
,
is
_pinned
);
}
template
<
typename
T
>
inline
T
*
Tensor
::
mutable_data
(
platform
::
Place
place
,
bool
use
_pinned
)
{
inline
T
*
Tensor
::
mutable_data
(
platform
::
Place
place
,
bool
is
_pinned
)
{
static_assert
(
std
::
is_pod
<
T
>::
value
,
"T must be POD"
);
return
reinterpret_cast
<
T
*>
(
mutable_data
(
place
,
typeid
(
T
),
use
_pinned
));
return
reinterpret_cast
<
T
*>
(
mutable_data
(
place
,
typeid
(
T
),
is
_pinned
));
}
inline
void
*
Tensor
::
mutable_data
(
platform
::
Place
place
,
std
::
type_index
type
,
bool
use
_pinned
)
{
bool
is
_pinned
)
{
if
(
holder_
!=
nullptr
)
{
holder_
->
set_type
(
type
);
}
...
...
@@ -129,27 +129,27 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type,
holder_
->
size
()
<
size
+
offset_
)
{
if
(
platform
::
is_cpu_place
(
place
))
{
holder_
.
reset
(
new
PlaceholderImpl
<
platform
::
CPUPlace
>
(
boost
::
get
<
platform
::
CPUPlace
>
(
place
),
size
,
type
,
use
_pinned
));
boost
::
get
<
platform
::
CPUPlace
>
(
place
),
size
,
type
,
is
_pinned
));
}
else
if
(
platform
::
is_gpu_place
(
place
))
{
#ifndef PADDLE_WITH_CUDA
PADDLE_THROW
(
"'CUDAPlace' is not supported in CPU only device."
);
}
#else
holder_
.
reset
(
new
PlaceholderImpl
<
platform
::
CUDAPlace
>
(
boost
::
get
<
platform
::
CUDAPlace
>
(
place
),
size
,
type
,
use
_pinned
));
boost
::
get
<
platform
::
CUDAPlace
>
(
place
),
size
,
type
,
is
_pinned
));
}
#endif
offset_
=
0
;
use_pinned_
=
use
_pinned
;
is_pinned_
=
is
_pinned
;
}
return
reinterpret_cast
<
void
*>
(
reinterpret_cast
<
uintptr_t
>
(
holder_
->
ptr
())
+
offset_
);
}
inline
void
*
Tensor
::
mutable_data
(
platform
::
Place
place
,
bool
use
_pinned
)
{
inline
void
*
Tensor
::
mutable_data
(
platform
::
Place
place
,
bool
is
_pinned
)
{
PADDLE_ENFORCE
(
this
->
holder_
!=
nullptr
,
"Cannot invoke mutable data if current hold nothing"
);
return
mutable_data
(
place
,
holder_
->
type
(),
use
_pinned
);
return
mutable_data
(
place
,
holder_
->
type
(),
is
_pinned
);
}
inline
Tensor
&
Tensor
::
ShareDataWith
(
const
Tensor
&
src
)
{
...
...
@@ -191,7 +191,7 @@ inline const DDim& Tensor::dims() const { return dims_; }
inline
int64_t
Tensor
::
numel
()
const
{
return
product
(
dims_
);
}
inline
bool
Tensor
::
isPinned
()
const
{
return
use
_pinned_
;
}
inline
bool
Tensor
::
isPinned
()
const
{
return
is
_pinned_
;
}
inline
Tensor
ReshapeToMatrix
(
const
Tensor
&
src
,
int
num_col_dims
)
{
Tensor
res
;
...
...
paddle/fluid/memory/detail/system_allocator.cc
浏览文件 @
39004080
...
...
@@ -123,8 +123,9 @@ void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) {
if
(
size
<=
0
)
return
nullptr
;
void
*
p
;
// NOTE: here, we use GpuMaxAllocSize() as the maximum memory size
// of host
fallback
allocation. Allocates too much would reduce
// of host
pinned
allocation. Allocates too much would reduce
// the amount of memory available to the underlying system for paging.
// Because the memory is in CPU side, other device can access it too.
size_t
usable
=
paddle
::
platform
::
GpuMaxAllocSize
()
-
fallback_alloc_size_
;
...
...
@@ -149,10 +150,10 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
err
=
cudaFreeHost
(
p
);
// Purposefully allow cudaErrorCudartUnloading, because
// that is returned if you ever call cudaFree after the
// that is returned if you ever call cudaFree
Host
after the
// driver has already shutdown. This happens only if the
// process is terminating, in which case we don't care if
// cudaFree succeeds.
// cudaFree
Host
succeeds.
if
(
err
!=
cudaErrorCudartUnloading
)
{
PADDLE_ENFORCE
(
err
,
"cudaFreeHost failed in GPUPinnedAllocator::Free."
);
}
...
...
paddle/fluid/memory/memory.cc
浏览文件 @
39004080
...
...
@@ -39,7 +39,7 @@ BuddyAllocator* GetCPUBuddyAllocator() {
template
<
>
void
*
Alloc
<
platform
::
CPUPlace
>
(
platform
::
CPUPlace
place
,
size_t
size
,
bool
use
_pinned
)
{
bool
is
_pinned
)
{
VLOG
(
10
)
<<
"Allocate "
<<
size
<<
" bytes on "
<<
platform
::
Place
(
place
);
void
*
p
=
GetCPUBuddyAllocator
()
->
Alloc
(
size
);
VLOG
(
10
)
<<
" pointer="
<<
p
;
...
...
@@ -48,7 +48,7 @@ void* Alloc<platform::CPUPlace>(platform::CPUPlace place, size_t size,
template
<
>
void
Free
<
platform
::
CPUPlace
>
(
platform
::
CPUPlace
place
,
void
*
p
,
bool
use
_pinned
)
{
bool
is
_pinned
)
{
VLOG
(
10
)
<<
"Free pointer="
<<
p
<<
" on "
<<
platform
::
Place
(
place
);
GetCPUBuddyAllocator
()
->
Free
(
p
);
}
...
...
@@ -115,9 +115,9 @@ size_t Used<platform::CUDAPlace>(platform::CUDAPlace place) {
template
<
>
void
*
Alloc
<
platform
::
CUDAPlace
>
(
platform
::
CUDAPlace
place
,
size_t
size
,
bool
use
_pinned
)
{
bool
is
_pinned
)
{
void
*
ptr
;
if
(
use
_pinned
)
{
if
(
is
_pinned
)
{
auto
*
buddy_allocator
=
GetCUDAPinnedBuddyAllocator
(
place
.
device
);
ptr
=
buddy_allocator
->
Alloc
(
size
);
}
else
{
...
...
@@ -143,8 +143,8 @@ void* Alloc<platform::CUDAPlace>(platform::CUDAPlace place, size_t size,
template
<
>
void
Free
<
platform
::
CUDAPlace
>
(
platform
::
CUDAPlace
place
,
void
*
p
,
bool
use
_pinned
)
{
if
(
use
_pinned
)
{
bool
is
_pinned
)
{
if
(
is
_pinned
)
{
GetCUDAPinnedBuddyAllocator
(
place
.
device
)
->
Free
(
p
);
}
else
{
GetGPUBuddyAllocator
(
place
.
device
)
->
Free
(
p
);
...
...
paddle/fluid/memory/memory.h
浏览文件 @
39004080
...
...
@@ -33,7 +33,7 @@ namespace memory {
* address is valid or not.
*/
template
<
typename
Place
>
void
*
Alloc
(
Place
place
,
size_t
size
,
bool
use
_pinned
=
false
);
void
*
Alloc
(
Place
place
,
size_t
size
,
bool
is
_pinned
=
false
);
/**
* \brief Free memory block in one place.
...
...
@@ -43,7 +43,7 @@ void* Alloc(Place place, size_t size, bool use_pinned = false);
*
*/
template
<
typename
Place
>
void
Free
(
Place
place
,
void
*
ptr
,
bool
use
_pinned
=
false
);
void
Free
(
Place
place
,
void
*
ptr
,
bool
is
_pinned
=
false
);
/**
* \brief Total size of used memory in one place.
...
...
@@ -74,15 +74,13 @@ class PODDeleter {
static_assert
(
std
::
is_pod
<
T
>::
value
,
"T must be POD"
);
public:
explicit
PODDeleter
(
Place
place
,
bool
use_pinned
=
false
)
:
place_
(
place
),
use_pinned_
(
use_pinned
)
{}
void
operator
()(
T
*
ptr
)
{
Free
(
place_
,
static_cast
<
void
*>
(
ptr
),
use_pinned_
);
}
explicit
PODDeleter
(
Place
place
,
bool
is_pinned
=
false
)
:
place_
(
place
),
is_pinned_
(
is_pinned
)
{}
void
operator
()(
T
*
ptr
)
{
Free
(
place_
,
static_cast
<
void
*>
(
ptr
),
is_pinned_
);
}
private:
Place
place_
;
bool
use
_pinned_
;
bool
is
_pinned_
;
};
/**
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录