Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
76b4dd62
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
76b4dd62
编写于
7月 12, 2017
作者:
F
fengjiayi
提交者:
GitHub
7月 12, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2809 from Canpio/dev_add_tensor_interface
add tensor interfaces
上级
2749b71f
2dccab87
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
166 addition
and
31 deletion
+166
-31
paddle/framework/tensor.h
paddle/framework/tensor.h
+48
-9
paddle/framework/tensor_test.cc
paddle/framework/tensor_test.cc
+118
-22
未找到文件。
paddle/framework/tensor.h
浏览文件 @
76b4dd62
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include <cstdint>
#include <memory>
#include <type_traits>
#include "paddle/framework/ddim.h"
...
...
@@ -26,31 +27,65 @@ namespace framework {
class
Tensor
{
public:
Tensor
()
:
offset_
(
0
)
{}
explicit
Tensor
(
const
DDim
&
dims
)
:
dims_
(
dims
),
offset_
(
0
)
{}
template
<
typename
T
>
const
T
*
data
()
const
{
PADDLE_ENFORCE
(
holder_
!=
nullptr
,
"Tensor::data must be called after Tensor::mutable_data."
);
return
static_cast
<
const
T
*>
(
holder_
->
Ptr
());
PADDLE_ENFORCE
(
holder_
!=
nullptr
,
"Tenosr has not been initialized. Call Tensor::mutable_data first."
);
return
reinterpret_cast
<
const
T
*>
(
reinterpret_cast
<
uintptr_t
>
(
holder_
->
Ptr
())
+
offset_
);
}
template
<
typename
T
,
// must be POD types
typename
std
::
enable_if
<
std
::
is_pod
<
T
>
::
value
>::
type
*
=
nullptr
>
T
*
mutable_data
(
DDim
dims
,
paddle
::
platform
::
Place
place
)
{
dims_
=
dims
;
if
(
holder_
==
nullptr
||
!
(
holder_
->
Place
()
==
place
)
/* some versions of boost::variant don't have operator!= */
||
holder_
->
Size
()
<
product
(
dims
)
*
sizeof
(
T
))
{
||
holder_
->
Size
()
<
product
(
dims
)
*
sizeof
(
T
)
+
offset_
)
{
holder_
.
reset
(
new
PlaceholderImpl
<
T
>
(
place
,
product
(
dims
)
*
sizeof
(
T
)));
offset_
=
0
;
}
return
static_cast
<
T
*>
(
holder_
->
Ptr
());
return
reinterpret_cast
<
T
*>
(
reinterpret_cast
<
uintptr_t
>
(
holder_
->
Ptr
())
+
offset_
);
}
template
<
typename
T
,
// must be POD types
typename
std
::
enable_if
<
std
::
is_pod
<
T
>
::
value
>::
type
*
=
nullptr
>
T
*
mutable_data
(
DDim
dims
)
{
return
mutable_data
<
T
>
(
dims
,
paddle
::
platform
::
get_place
());
void
ShareDataFrom
(
const
Tensor
&
src
)
{
PADDLE_ENFORCE
(
src
.
holder_
!=
nullptr
,
"Can not share data from an uninitialized tensor."
);
holder_
=
src
.
holder_
;
dims_
=
src
.
dims_
;
offset_
=
src
.
offset_
;
}
Tensor
Slice
(
const
int
&
begin_idx
,
const
int
&
end_idx
)
const
{
PADDLE_ENFORCE
(
holder_
!=
nullptr
,
"The sliced tenosr has not been initialized."
);
PADDLE_ENFORCE
(
begin_idx
>=
0
&&
end_idx
<=
dims_
[
0
],
"Slice index is less than zero or out of bound."
);
PADDLE_ENFORCE
(
begin_idx
<
end_idx
,
"Begin index must be less than end index."
);
PADDLE_ENFORCE
(
dims_
[
0
]
!=
1
,
"Can not slice a tensor with dims_[0] = 1."
);
std
::
vector
<
int
>
d
=
vectorize
(
dims_
);
int
base
=
1
;
for
(
size_t
i
=
1
;
i
<
d
.
size
();
++
i
)
{
base
*=
d
[
i
];
}
Tensor
dst
;
dst
.
holder_
=
holder_
;
dst
.
dims_
=
dims_
;
dst
.
dims_
[
0
]
=
end_idx
-
begin_idx
;
dst
.
offset_
=
offset_
+
begin_idx
*
base
*
holder_
->
TypeSize
();
return
dst
;
}
DDim
dims
()
const
{
return
dims_
;
}
private:
// Placeholder hides type T, so it doesn't appear as a template
// parameter of Variable.
...
...
@@ -59,6 +94,7 @@ class Tensor {
virtual
void
*
Ptr
()
const
=
0
;
virtual
paddle
::
platform
::
Place
Place
()
const
=
0
;
virtual
size_t
Size
()
const
=
0
;
virtual
size_t
TypeSize
()
const
=
0
;
};
template
<
typename
T
>
...
...
@@ -85,6 +121,7 @@ class Tensor {
virtual
void
*
Ptr
()
const
{
return
static_cast
<
void
*>
(
ptr_
.
get
());
}
virtual
size_t
Size
()
const
{
return
size_
;
}
virtual
paddle
::
platform
::
Place
Place
()
const
{
return
place_
;
}
virtual
size_t
TypeSize
()
const
{
return
sizeof
(
T
);
}
std
::
unique_ptr
<
T
,
Deleter
>
ptr_
;
paddle
::
platform
::
Place
place_
;
// record the place of ptr_.
...
...
@@ -92,6 +129,8 @@ class Tensor {
};
std
::
shared_ptr
<
Placeholder
>
holder_
;
// holds the memory block if allocated.
DDim
dims_
;
size_t
offset_
;
// marks the begin of tensor data area.
};
}
// namespace framework
...
...
paddle/framework/tensor_test.cc
浏览文件 @
76b4dd62
...
...
@@ -15,15 +15,27 @@
#include <gtest/gtest.h>
#include <string>
TEST
(
Tensor
,
ASSERT
)
{
paddle
::
framework
::
Tensor
cpu_tensor
;
TEST
(
Tensor
,
Dims
)
{
using
namespace
paddle
::
framework
;
using
namespace
paddle
::
platform
;
Tensor
tt
(
make_ddim
({
2
,
3
,
4
}));
DDim
dims
=
tt
.
dims
();
ASSERT_EQ
(
arity
(
dims
),
3
);
for
(
int
i
=
0
;
i
<
3
;
++
i
)
{
EXPECT_EQ
(
i
+
2
,
dims
[
i
]);
}
}
TEST
(
Tensor
,
DataAssert
)
{
paddle
::
framework
::
Tensor
src_tensor
;
bool
caught
=
false
;
try
{
const
double
*
p
__attribute__
((
unused
))
=
cpu
_tensor
.
data
<
double
>
();
src
_tensor
.
data
<
double
>
();
}
catch
(
paddle
::
framework
::
EnforceNotMet
err
)
{
caught
=
true
;
std
::
string
msg
=
"Tensor::data must be called after Tensor::mutable_data."
;
std
::
string
msg
=
"Tenosr has not been initialized. Call Tensor::mutable_data first."
;
const
char
*
what
=
err
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
...
...
@@ -32,54 +44,138 @@ TEST(Tensor, ASSERT) {
ASSERT_TRUE
(
caught
);
}
/*
mutable_data() is not tested
at present
/*
following tests are not available
at present
because Memory::Alloc() and Memory::Free() have not been ready.
TEST(Tensor, MutableData) {
using namespace paddle::framework;
using namespace paddle::platform;
{
Tensor
cpu
_tensor;
Tensor
src
_tensor;
float* p1 = nullptr;
float* p2 = nullptr;
// initialization
p1 =
cpu
_tensor.mutable_data<float>(make_ddim({1, 2, 3}), CPUPlace());
p1 =
src
_tensor.mutable_data<float>(make_ddim({1, 2, 3}), CPUPlace());
EXPECT_NE(p1, nullptr);
// set
cpu
_tensor a new dim with large size
// set
src
_tensor a new dim with large size
// momery is supposed to be re-allocated
p2 =
cpu_tensor.mutable_data<float>(make_ddim({3, 4}
));
p2 =
src_tensor.mutable_data<float>(make_ddim({3, 4}), CPUPlace(
));
EXPECT_NE(p2, nullptr);
EXPECT_NE(p1, p2);
// set
cpu
_tensor a new dim with same size
// set
src
_tensor a new dim with same size
// momery block is supposed to be unchanged
p1 =
cpu_tensor.mutable_data<float>(make_ddim({2, 2, 3}
));
p1 =
src_tensor.mutable_data<float>(make_ddim({2, 2, 3}), CPUPlace(
));
EXPECT_EQ(p1, p2);
// set
cpu
_tensor a new dim with smaller size
// set
src
_tensor a new dim with smaller size
// momery block is supposed to be unchanged
p2 =
cpu_tensor.mutable_data<float>(make_ddim({2, 2}
));
p2 =
src_tensor.mutable_data<float>(make_ddim({2, 2}), CPUPlace(
));
EXPECT_EQ(p1, p2);
}
{
Tensor
gpu
_tensor;
Tensor
src
_tensor;
float* p1 = nullptr;
float* p2 = nullptr;
// initialization
p1 =
gpu
_tensor.mutable_data<float>(make_ddim({1, 2, 3}), GPUPlace());
p1 =
src
_tensor.mutable_data<float>(make_ddim({1, 2, 3}), GPUPlace());
EXPECT_NE(p1, nullptr);
// set
gpu
_tensor a new dim with large size
// set
src
_tensor a new dim with large size
// momery is supposed to be re-allocated
p2 =
gpu_tensor.mutable_data<float>(make_ddim({3, 4}
));
p2 =
src_tensor.mutable_data<float>(make_ddim({3, 4}), GPUPlace(
));
EXPECT_NE(p2, nullptr);
EXPECT_NE(p1, p2);
// set
gpu
_tensor a new dim with same size
// set
src
_tensor a new dim with same size
// momery block is supposed to be unchanged
p1 =
gpu_tensor.mutable_data<float>(make_ddim({2, 2, 3}
));
p1 =
src_tensor.mutable_data<float>(make_ddim({2, 2, 3}), GPUPlace(
));
EXPECT_EQ(p1, p2);
// set
gpu
_tensor a new dim with smaller size
// set
src
_tensor a new dim with smaller size
// momery block is supposed to be unchanged
p2 =
gpu_tensor.mutable_data<float>(make_ddim({2, 2}
));
p2 =
src_tensor.mutable_data<float>(make_ddim({2, 2}), GPUPlace(
));
EXPECT_EQ(p1, p2);
}
}
*/
TEST(Tensor, ShareDataFrom) {
using namespace paddle::framework;
using namespace paddle::platform;
{
Tensor src_tensor;
Tensor dst_tensor;
// Try to share data form uninitialized tensor
bool caught = false;
try {
dst_tensor.ShareDataFrom(src_tensor);
} catch (EnforceNotMet err) {
caught = true;
std::string msg = "Can not share data from an uninitialized tensor.";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
ASSERT_EQ(what[i], msg[i]);
}
}
ASSERT_TRUE(caught);
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), CPUPlace());
dst_tensor.ShareDataFrom(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
}
{
Tensor src_tensor;
Tensor dst_tensor;
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace());
dst_tensor.ShareDataFrom(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
}
}
TEST(Tensor, Slice) {
using namespace paddle::framework;
using namespace paddle::platform;
{
Tensor src_tensor;
src_tensor.mutable_data<int>(make_ddim({5, 3, 4}), CPUPlace());
Tensor slice_tensor = src_tensor.Slice(1, 3);
DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 3);
EXPECT_EQ(slice_dims[0], 2);
EXPECT_EQ(slice_dims[1], 3);
EXPECT_EQ(slice_dims[2], 4);
uintptr_t src_data_address =
reinterpret_cast<uintptr_t>(src_tensor.data<int>());
uintptr_t src_mutable_data_address = reinterpret_cast<uintptr_t>(
src_tensor.mutable_data<int>(src_tensor.dims(), CPUPlace()));
uintptr_t slice_data_address =
reinterpret_cast<uintptr_t>(slice_tensor.data<int>());
uintptr_t slice_mutable_data_address = reinterpret_cast<uintptr_t>(
slice_tensor.mutable_data<int>(slice_tensor.dims(), CPUPlace()));
EXPECT_EQ(src_data_address, src_mutable_data_address);
EXPECT_EQ(slice_data_address, slice_mutable_data_address);
EXPECT_EQ(src_data_address + 3 * 4 * 1 * sizeof(int), slice_data_address);
}
{
Tensor src_tensor;
src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace());
Tensor slice_tensor = src_tensor.Slice(2, 6);
DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 2);
EXPECT_EQ(slice_dims[0], 4);
EXPECT_EQ(slice_dims[1], 9);
uintptr_t src_data_address =
reinterpret_cast<uintptr_t>(src_tensor.data<double>());
uintptr_t src_mutable_data_address = reinterpret_cast<uintptr_t>(
src_tensor.mutable_data<double>(src_tensor.dims(), GPUPlace()));
uintptr_t slice_data_address =
reinterpret_cast<uintptr_t>(slice_tensor.data<double>());
uintptr_t slice_mutable_data_address = reinterpret_cast<uintptr_t>(
slice_tensor.mutable_data<double>(slice_tensor.dims(), GPUPlace()));
EXPECT_EQ(src_data_address, src_mutable_data_address);
EXPECT_EQ(slice_data_address, slice_mutable_data_address);
EXPECT_EQ(src_data_address + 9 * 2 * sizeof(double), slice_data_address);
}
}
*/
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录