Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c48fc4d8
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c48fc4d8
编写于
7月 14, 2017
作者:
F
fengjiayi
提交者:
GitHub
7月 14, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2825 from Canpio/dev_add_tensor_copy
Add Tensor::CopyFrom and Tensor::mutable_data(Place place)
上级
3f5e650d
57a22db3
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
101 addition
and
41 deletion
+101
-41
paddle/framework/tensor.h
paddle/framework/tensor.h
+65
-30
paddle/framework/tensor_test.cc
paddle/framework/tensor_test.cc
+36
-11
未找到文件。
paddle/framework/tensor.h
浏览文件 @
c48fc4d8
...
...
@@ -15,8 +15,8 @@ limitations under the License. */
#pragma once
#include <cstdint>
#include <cstring>
#include <memory>
#include <type_traits>
#include "paddle/framework/ddim.h"
#include "paddle/framework/enforce.h"
#include "paddle/memory/memory.h"
...
...
@@ -27,45 +27,63 @@ namespace framework {
class
Tensor
{
public:
Tensor
()
:
offset_
(
0
)
{}
Tensor
()
:
numel_
(
0
),
offset_
(
0
)
{}
explicit
Tensor
(
const
DDim
&
dims
)
:
dims_
(
dims
),
offset_
(
0
)
{}
Tensor
&
operator
=
(
const
Tensor
&
src
)
=
delete
;
template
<
typename
T
>
const
T
*
data
()
const
{
PADDLE_ENFORCE
(
holder_
!=
nullptr
,
"Tenosr has not been initialized. Call Tensor::mutable_data first."
);
CheckDims
<
T
>
();
return
reinterpret_cast
<
const
T
*>
(
reinterpret_cast
<
uintptr_t
>
(
holder_
->
P
tr
())
+
offset_
);
reinterpret_cast
<
uintptr_t
>
(
holder_
->
p
tr
())
+
offset_
);
}
template
<
typename
T
,
// must be POD types
typename
std
::
enable_if
<
std
::
is_pod
<
T
>
::
value
>::
type
*
=
nullptr
>
template
<
typename
T
>
T
*
mutable_data
(
DDim
dims
,
paddle
::
platform
::
Place
place
)
{
dims_
=
dims
;
set_dims
(
dims
);
return
mutable_data
<
T
>
(
place
);
}
template
<
typename
T
>
T
*
mutable_data
(
paddle
::
platform
::
Place
place
)
{
PADDLE_ENFORCE
(
numel_
>
0
,
"Tensor::numel_ must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first."
);
if
(
holder_
==
nullptr
||
!
(
holder_
->
P
lace
()
==
!
(
holder_
->
p
lace
()
==
place
)
/* some versions of boost::variant don't have operator!= */
||
holder_
->
Size
()
<
product
(
dims
)
*
sizeof
(
T
)
+
offset_
)
{
holder_
.
reset
(
new
PlaceholderImpl
<
T
>
(
place
,
product
(
dims
)
*
sizeof
(
T
)));
||
holder_
->
size
()
<
numel_
*
sizeof
(
T
)
+
offset_
)
{
holder_
.
reset
(
new
PlaceholderImpl
<
T
>
(
place
,
numel_
*
sizeof
(
T
)));
offset_
=
0
;
}
return
reinterpret_cast
<
T
*>
(
reinterpret_cast
<
uintptr_t
>
(
holder_
->
P
tr
())
+
return
reinterpret_cast
<
T
*>
(
reinterpret_cast
<
uintptr_t
>
(
holder_
->
p
tr
())
+
offset_
);
}
template
<
typename
T
>
void
ShareDataFrom
(
const
Tensor
&
src
)
{
PADDLE_ENFORCE
(
src
.
holder_
!=
nullptr
,
"Can not share data from an uninitialized tensor."
);
src
.
CheckDims
<
T
>
();
holder_
=
src
.
holder_
;
dims_
=
src
.
dims_
;
set_dims
(
src
.
dims
())
;
offset_
=
src
.
offset_
;
}
template
<
typename
T
>
void
CopyFrom
(
const
Tensor
&
src
,
paddle
::
platform
::
Place
dst_place
)
{
PADDLE_ENFORCE
(
platform
::
is_cpu_place
(
src
.
holder_
->
place
())
&&
platform
::
is_cpu_place
(
dst_place
),
"Tensor::CopyFrom only support CPU now."
);
src
.
CheckDims
<
T
>
();
size_t
size
=
src
.
numel_
*
sizeof
(
T
);
set_dims
(
src
.
dims
());
const
void
*
src_ptr
=
static_cast
<
const
void
*>
(
src
.
data
<
T
>
());
void
*
dst_ptr
=
static_cast
<
void
*>
(
mutable_data
<
T
>
(
dst_place
));
memcpy
(
dst_ptr
,
src_ptr
,
size
);
}
template
<
typename
T
>
Tensor
Slice
(
const
int
&
begin_idx
,
const
int
&
end_idx
)
const
{
PADDLE_ENFORCE
(
holder_
!=
nullptr
,
"The sliced tenosr has not been initialized."
);
CheckDims
<
T
>
();
PADDLE_ENFORCE
(
begin_idx
>=
0
&&
end_idx
<=
dims_
[
0
],
"Slice index is less than zero or out of bound."
);
PADDLE_ENFORCE
(
begin_idx
<
end_idx
,
...
...
@@ -78,12 +96,21 @@ class Tensor {
}
Tensor
dst
;
dst
.
holder_
=
holder_
;
dst
.
dims_
=
dims_
;
dst
.
dims_
[
0
]
=
end_idx
-
begin_idx
;
dst
.
offset_
=
offset_
+
begin_idx
*
base
*
holder_
->
TypeSize
();
DDim
dst_dims
=
dims_
;
dst_dims
[
0
]
=
end_idx
-
begin_idx
;
dst
.
set_dims
(
dst_dims
);
dst
.
offset_
=
offset_
+
begin_idx
*
base
*
sizeof
(
T
);
return
dst
;
}
void
set_dims
(
const
DDim
&
dims
)
{
if
(
dims
==
dims_
)
{
return
;
}
dims_
=
dims
;
numel_
=
product
(
dims_
);
}
DDim
dims
()
const
{
return
dims_
;
}
private:
...
...
@@ -91,10 +118,9 @@ class Tensor {
// parameter of Variable.
struct
Placeholder
{
virtual
~
Placeholder
()
{}
virtual
void
*
Ptr
()
const
=
0
;
virtual
paddle
::
platform
::
Place
Place
()
const
=
0
;
virtual
size_t
Size
()
const
=
0
;
virtual
size_t
TypeSize
()
const
=
0
;
virtual
void
*
ptr
()
const
=
0
;
virtual
paddle
::
platform
::
Place
place
()
const
=
0
;
virtual
size_t
size
()
const
=
0
;
};
template
<
typename
T
>
...
...
@@ -118,18 +144,27 @@ class Tensor {
place_
(
place
),
size_
(
size
)
{}
virtual
void
*
Ptr
()
const
{
return
static_cast
<
void
*>
(
ptr_
.
get
());
}
virtual
size_t
Size
()
const
{
return
size_
;
}
virtual
paddle
::
platform
::
Place
Place
()
const
{
return
place_
;
}
virtual
size_t
TypeSize
()
const
{
return
sizeof
(
T
);
}
virtual
void
*
ptr
()
const
{
return
static_cast
<
void
*>
(
ptr_
.
get
());
}
virtual
size_t
size
()
const
{
return
size_
;
}
virtual
paddle
::
platform
::
Place
place
()
const
{
return
place_
;
}
std
::
unique_ptr
<
T
,
Deleter
>
ptr_
;
paddle
::
platform
::
Place
place_
;
// record the place of ptr_.
size_t
size_
;
// size of the memory block.
};
template
<
typename
T
>
inline
void
CheckDims
()
const
{
PADDLE_ENFORCE
(
holder_
!=
nullptr
,
"Tenosr holds no memory. Call Tensor::mutable_data first."
);
PADDLE_ENFORCE
(
holder_
->
size
()
>=
numel_
*
sizeof
(
T
)
+
offset_
,
"Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory."
);
}
std
::
shared_ptr
<
Placeholder
>
holder_
;
// holds the memory block if allocated.
DDim
dims_
;
size_t
numel_
;
// cache of `product(dims_)`
size_t
offset_
;
// marks the begin of tensor data area.
};
...
...
paddle/framework/tensor_test.cc
浏览文件 @
c48fc4d8
...
...
@@ -18,7 +18,8 @@
TEST
(
Tensor
,
Dims
)
{
using
namespace
paddle
::
framework
;
using
namespace
paddle
::
platform
;
Tensor
tt
(
make_ddim
({
2
,
3
,
4
}));
Tensor
tt
;
tt
.
set_dims
(
make_ddim
({
2
,
3
,
4
}));
DDim
dims
=
tt
.
dims
();
ASSERT_EQ
(
arity
(
dims
),
3
);
for
(
int
i
=
0
;
i
<
3
;
++
i
)
{
...
...
@@ -35,7 +36,7 @@ TEST(Tensor, DataAssert) {
}
catch
(
paddle
::
framework
::
EnforceNotMet
err
)
{
caught
=
true
;
std
::
string
msg
=
"Tenosr h
as not been initialized
. Call Tensor::mutable_data first."
;
"Tenosr h
olds no memory
. Call Tensor::mutable_data first."
;
const
char
*
what
=
err
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
...
...
@@ -104,19 +105,18 @@ TEST(Tensor, ShareDataFrom) {
// Try to share data form uninitialized tensor
bool caught = false;
try {
dst_tensor.ShareDataFrom(src_tensor);
dst_tensor.ShareDataFrom
<float>
(src_tensor);
} catch (EnforceNotMet err) {
caught = true;
std::string msg = "Can not share data from an uninitialized tensor.";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
ASSERT_EQ(what[i], msg[i]);
std::string msg = "Tenosr holds no memory. Call Tensor::mutable_data
first."; const char* what = err.what(); for (size_t i = 0; i < msg.length();
++i) { ASSERT_EQ(what[i], msg[i]);
}
}
ASSERT_TRUE(caught);
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), CPUPlace());
dst_tensor.ShareDataFrom(src_tensor);
dst_tensor.ShareDataFrom
<int>
(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
}
...
...
@@ -124,7 +124,7 @@ TEST(Tensor, ShareDataFrom) {
Tensor src_tensor;
Tensor dst_tensor;
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace());
dst_tensor.ShareDataFrom(src_tensor);
dst_tensor.ShareDataFrom
<int>
(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
}
}
...
...
@@ -135,7 +135,7 @@ TEST(Tensor, Slice) {
{
Tensor src_tensor;
src_tensor.mutable_data<int>(make_ddim({5, 3, 4}), CPUPlace());
Tensor slice_tensor = src_tensor.Slice(1, 3);
Tensor slice_tensor = src_tensor.Slice
<int>
(1, 3);
DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 3);
EXPECT_EQ(slice_dims[0], 2);
...
...
@@ -158,7 +158,7 @@ TEST(Tensor, Slice) {
{
Tensor src_tensor;
src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace());
Tensor slice_tensor = src_tensor.Slice(2, 6);
Tensor slice_tensor = src_tensor.Slice
<double>
(2, 6);
DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 2);
EXPECT_EQ(slice_dims[0], 4);
...
...
@@ -178,4 +178,29 @@ TEST(Tensor, Slice) {
}
}
TEST(Tensor, CopyFrom) {
using namespace paddle::framework;
using namespace paddle::platform;
Tensor src_tensor;
int* src_ptr = src_tensor.mutable_data<int>(make_ddim({3, 3}), CPUPlace());
int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
memcpy(src_ptr, arr, 9 * sizeof(int));
Tensor dst_tensor;
dst_tensor.CopyFrom<int>(src_tensor, CPUPlace());
const int* dst_ptr = dst_tensor.data<int>();
ASSERT_NE(src_ptr, dst_ptr);
for (size_t i = 0; i < 9; ++i) {
EXPECT_EQ(src_ptr[i], dst_ptr[i]);
}
Tensor slice_tensor = src_tensor.Slice<int>(1, 2);
dst_tensor.CopyFrom<int>(slice_tensor, CPUPlace());
const int* slice_ptr = slice_tensor.data<int>();
dst_ptr = dst_tensor.data<int>();
ASSERT_NE(dst_ptr, slice_ptr);
for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
}
}
*/
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录