Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ffdcfe08
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ffdcfe08
编写于
9月 02, 2017
作者:
Y
Yi Wang
提交者:
GitHub
9月 02, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3784 from QiJune/refine_LODTensor
Refine LODTensor using composition instead of derivation
上级
fc8a1afa
835572af
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
89 addition
and
125 deletion
+89
-125
paddle/framework/lod_tensor.cc
paddle/framework/lod_tensor.cc
+29
-12
paddle/framework/lod_tensor.h
paddle/framework/lod_tensor.h
+33
-75
paddle/framework/lod_tensor_test.cc
paddle/framework/lod_tensor_test.cc
+27
-38
未找到文件。
paddle/framework/lod_tensor.cc
浏览文件 @
ffdcfe08
...
...
@@ -19,25 +19,24 @@
namespace
paddle
{
namespace
framework
{
LODTensor
::
LOD
LODTensor
::
LOD
::
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
const
{
LOD
SliceLevels
(
const
LOD
&
in
,
size_t
level_begin
,
size_t
level_end
)
{
LOD
new_lod
;
new_lod
.
reserve
(
level_end
-
level_begin
);
for
(
size_t
i
=
level_begin
;
i
<
level_end
;
i
++
)
{
new_lod
.
emplace_back
(
at
(
i
));
new_lod
.
emplace_back
(
in
.
at
(
i
));
}
return
new_lod
;
}
LOD
Tensor
::
LOD
LODTensor
::
LOD
::
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
{
LOD
SliceInLevel
(
const
LOD
&
in
,
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
{
// slice the lod.
LOD
new_lod
;
new_lod
.
reserve
(
size
()
-
level
);
auto
start
=
this
->
at
(
level
)[
elem_begin
];
auto
end
=
this
->
at
(
level
)[
elem_end
];
new_lod
.
reserve
(
in
.
size
()
-
level
);
auto
start
=
in
.
at
(
level
)[
elem_begin
];
auto
end
=
in
.
at
(
level
)[
elem_end
];
for
(
auto
it
=
this
->
begin
()
+
level
;
it
!=
this
->
end
();
it
++
)
{
for
(
auto
it
=
in
.
begin
()
+
level
;
it
!=
in
.
end
();
it
++
)
{
auto
it_begin
=
std
::
find
(
it
->
begin
(),
it
->
end
(),
start
);
auto
it_end
=
std
::
find
(
it_begin
,
it
->
end
(),
end
);
PADDLE_ENFORCE
(
it_begin
!=
it
->
end
(),
"error in parsing lod info"
);
...
...
@@ -49,11 +48,11 @@ LODTensor::LOD LODTensor::LOD::SliceInLevel(size_t level, size_t elem_begin,
[
start
](
int
v
)
{
return
v
-
start
;
});
PADDLE_ENFORCE_EQ
(
new_lod
.
back
().
front
(),
0
,
"error in slice LOD"
);
}
PADDLE_ENFORCE_LE
(
new_lod
.
size
(),
this
->
size
());
PADDLE_ENFORCE_LE
(
new_lod
.
size
(),
in
.
size
());
return
new_lod
;
}
bool
operator
==
(
const
LOD
Tensor
::
LOD
&
a
,
const
LODTensor
::
LOD
&
b
)
{
bool
operator
==
(
const
LOD
&
a
,
const
LOD
&
b
)
{
if
(
a
.
size
()
!=
b
.
size
())
{
return
false
;
}
...
...
@@ -70,9 +69,27 @@ bool operator==(const LODTensor::LOD& a, const LODTensor::LOD& b) {
}
}
}
return
true
;
}
void
LODTensor
::
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
{
auto
new_lod
=
framework
::
SliceLevels
(
lod_
,
level_begin
,
level_end
);
lod_
=
new_lod
;
}
void
LODTensor
::
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
{
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
NumLevels
());
PADDLE_ENFORCE
(
elem_begin
<
NumElements
(
level
),
"element begin [%d] out of range [%d]"
,
elem_begin
,
NumElements
(
level
));
PADDLE_ENFORCE
(
elem_end
<
NumElements
(
level
)
+
1
,
"element end [%d] out of range [%d]"
,
elem_end
,
NumElements
(
level
));
auto
new_lod
=
framework
::
SliceInLevel
(
lod_
,
level
,
elem_begin
,
elem_end
);
lod_
=
new_lod
;
}
}
// namespace framework
}
// namespace paddle
paddle/framework/lod_tensor.h
浏览文件 @
ffdcfe08
...
...
@@ -15,7 +15,7 @@
#pragma once
#include <memory>
#if
!defined(PADDLE_ONLY_CPU)
#if
ndef PADDLE_ONLY_CPU
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#endif
...
...
@@ -27,33 +27,39 @@
namespace
paddle
{
namespace
framework
{
#ifdef PADDLE_ONLY_CPU
template
<
typename
T
>
using
Vector
=
std
::
vector
<
T
>
;
#else
template
<
typename
T
>
using
Vector
=
thrust
::
host_vector
<
T
>
;
#endif
using
LOD
=
std
::
vector
<
Vector
<
size_t
>>
;
LOD
SliceLevels
(
const
LOD
&
in
,
size_t
level_begin
,
size_t
level_end
);
LOD
SliceInLevel
(
const
LOD
&
in
,
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
);
bool
operator
==
(
const
LOD
&
a
,
const
LOD
&
b
);
/*
* LODTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
class
LODTensor
:
public
Tensor
{
class
LODTensor
{
public:
// Level save offsets of each unit.
#ifdef PADDLE_ONLY_CPU
template
<
typename
T
>
using
Vector
=
std
::
vector
<
T
>
;
#else
template
<
typename
T
>
using
Vector
=
thrust
::
host_vector
<
T
>
;
#endif
// LoD stores offsets of each level of units, the largest units level first,
// then the smaller units level. Each Level stores the offsets of units in
// Tesor.
class
LOD
:
public
std
::
vector
<
Vector
<
size_t
>>
{
public:
LOD
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
const
;
LOD
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
;
};
LODTensor
()
{}
explicit
LODTensor
(
const
LOD
&
lod
)
:
lod_
(
lod
)
{}
LODTensor
(
const
LOD
&
lod
,
Tensor
*
t
)
:
lod_
(
lod
),
tensor_
(
t
)
{}
void
set_lod
(
const
LOD
&
lod
)
{
lod_
=
lod
;
}
virtual
Tensor
*
Clone
()
const
{
return
new
LODTensor
(
lod_
);
}
void
set_tensor
(
Tensor
*
tensor
)
{
tensor_
=
tensor
;
}
Tensor
&
tensor
()
{
return
*
tensor_
;
}
LOD
lod
()
{
return
lod_
;
}
/*
* Get a element from LOD.
...
...
@@ -79,71 +85,23 @@ class LODTensor : public Tensor {
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
NumLevels
());
// the last offset is the end of last element
return
lod_
[
level
].
size
()
-
1
;
return
(
lod_
)
[
level
].
size
()
-
1
;
}
/*
* Slice of levels[level_begin:level_end]
, with tensor shared.
* Slice of levels[level_begin:level_end]
*/
template
<
typename
T
>
LODTensor
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
const
;
void
SliceLevels
(
size_t
level_begin
,
size_t
level_end
);
/*
* Slice of elements of a level, [elem_begin: elem_end]
, with tensor shared.
* Slice of elements of a level, [elem_begin: elem_end]
* @note: low performance in slice lod_.
*/
template
<
typename
T
>
LODTensor
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
;
/*
* Copy other's lod_'s content, free to mutate.
*/
void
CopyLOD
(
const
LODTensor
&
other
)
{
lod_
=
other
.
lod_
;
}
/*
* Determine whether LODTensor has a valid LOD info.
*/
const
LOD
&
lod
()
const
{
return
lod_
;
}
LOD
*
mutable_lod
()
{
return
&
lod_
;
}
virtual
~
LODTensor
()
{}
void
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
);
private:
LOD
lod_
;
Tensor
*
tensor_
;
// not owned
};
bool
operator
==
(
const
LODTensor
::
LOD
&
a
,
const
LODTensor
::
LOD
&
b
);
template
<
typename
T
>
LODTensor
LODTensor
::
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
const
{
auto
new_lod
=
lod_
.
SliceLevels
(
level_begin
,
level_end
);
// slice levels just need to update LOD info, each level will contains the
// whole tensor_, so no need to modify tensor_.
LODTensor
new_tensor
(
new_lod
);
new_tensor
.
ShareDataWith
<
T
>
(
*
this
);
return
new_tensor
;
}
template
<
typename
T
>
LODTensor
LODTensor
::
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
{
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
NumLevels
());
PADDLE_ENFORCE
(
elem_begin
<
NumElements
(
level
),
"element begin [%d] out of range [%d]"
,
elem_begin
,
NumElements
(
level
));
PADDLE_ENFORCE
(
elem_end
<
NumElements
(
level
)
+
1
,
"element end [%d] out of range [%d]"
,
elem_end
,
NumElements
(
level
));
auto
new_lod
=
lod_
.
SliceInLevel
(
level
,
elem_begin
,
elem_end
);
// slice elements just need to update LOD info, because offsets are not
// changed, so the original tensor_ can be reused.
LODTensor
new_tensor
(
new_lod
);
new_tensor
.
ShareDataWith
<
T
>
(
*
this
);
return
new_tensor
;
}
}
// namespace framework
}
// namespace paddle
paddle/framework/lod_tensor_test.cc
浏览文件 @
ffdcfe08
...
...
@@ -24,13 +24,12 @@ namespace framework {
class
LODTensorTester
:
public
::
testing
::
Test
{
public:
virtual
void
SetUp
()
override
{
lod_tensor
.
reset
(
new
LODTensor
);
// tensor's batch_size: 30
// 3 levels
// 0 10 20
// 0 5 10 15 20
// 0 2 5 7 10 12 15 20
LOD
Tensor
::
LOD
lod
;
LOD
lod
;
lod
.
push_back
(
std
::
vector
<
size_t
>
{
0
,
10
,
20
});
lod
.
push_back
(
std
::
vector
<
size_t
>
{
0
,
5
,
10
,
15
,
20
});
lod
.
push_back
(
std
::
vector
<
size_t
>
{
0
,
2
,
5
,
7
,
10
,
12
,
15
,
17
,
20
});
...
...
@@ -41,75 +40,65 @@ class LODTensorTester : public ::testing::Test {
// malloc memory
tensor
.
mutable_data
<
float
>
(
place
);
lod_tensor
.
reset
(
new
LODTensor
(
lod
));
lod_tensor
->
Resize
({
20
/*batch size*/
,
128
/*dim*/
});
lod_tensor
->
ShareDataWith
<
float
>
(
tensor
);
// lod_tensor->ShareDataWith<Tensor>(tensor);
lod_tensor
.
set_lod
(
lod
);
lod_tensor
.
set_tensor
(
&
tensor
);
}
protected:
std
::
unique_ptr
<
LODTensor
>
lod_tensor
;
platform
::
CPUPlace
place
;
Tensor
tensor
;
LODTensor
lod_tensor
;
};
TEST_F
(
LODTensorTester
,
NumLevels
)
{
ASSERT_EQ
(
lod_tensor
->
NumLevels
(),
3UL
);
}
TEST_F
(
LODTensorTester
,
NumLevels
)
{
ASSERT_EQ
(
lod_tensor
.
NumLevels
(),
3UL
);
}
TEST_F
(
LODTensorTester
,
NumElements
)
{
ASSERT_EQ
(
lod_tensor
->
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
lod_tensor
->
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
lod_tensor
->
NumElements
(
2
),
8UL
);
ASSERT_EQ
(
lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
lod_tensor
.
NumElements
(
2
),
8UL
);
}
TEST_F
(
LODTensorTester
,
SliceLevels
)
{
// slice 1 level
for
(
size_t
level
=
0
;
level
<
3UL
;
++
level
)
{
auto
new_lod_tensor
=
lod_tensor
->
SliceLevels
<
float
>
(
level
,
level
+
1
);
LODTensor
new_lod_tensor
=
lod_tensor
;
new_lod_tensor
.
SliceLevels
(
level
,
level
+
1
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
1UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0UL
),
lod_tensor
->
NumElements
(
level
));
// ASSERT_EQ(new_lod_tensor, *lod_tensor);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
tensor
().
data
<
float
>
(),
lod_tensor
.
tensor
().
data
<
float
>
());
}
// slice 2 level
for
(
size_t
level
=
0
;
level
<
2UL
;
++
level
)
{
auto
new_lod_tensor
=
lod_tensor
->
SliceLevels
<
float
>
(
level
,
level
+
2
);
LODTensor
new_lod_tensor
=
lod_tensor
;
new_lod_tensor
.
SliceLevels
(
level
,
level
+
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor
->
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
lod_tensor
->
NumElements
(
level
+
1
));
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor
->
data
<
float
>
());
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor
.
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
lod_tensor
.
NumElements
(
level
+
1
));
ASSERT_EQ
(
new_lod_tensor
.
tensor
().
data
<
float
>
(),
lod_tensor
.
tensor
().
data
<
float
>
());
}
}
TEST_F
(
LODTensorTester
,
SliceInLevel
)
{
size_t
level
=
0
;
auto
new_lod_tensor
=
lod_tensor
->
SliceInLevel
<
float
>
(
level
,
0
,
2
);
LODTensor
new_lod_tensor
=
lod_tensor
;
new_lod_tensor
.
SliceInLevel
(
level
,
0
,
2
);
EXPECT_EQ
(
new_lod_tensor
.
NumLevels
(),
3UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
EXPECT_EQ
(
new_lod_tensor
.
NumElements
(
2
),
8UL
);
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor
->
data
<
float
>
());
ASSERT_EQ
(
new_lod_tensor
.
tensor
().
data
<
float
>
(),
lod_tensor
.
tensor
().
data
<
float
>
());
level
=
1
;
new_lod_tensor
=
lod_tensor
->
SliceInLevel
<
float
>
(
level
,
0
,
2
);
new_lod_tensor
=
lod_tensor
;
new_lod_tensor
.
SliceInLevel
(
level
,
0
,
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor
->
data
<
float
>
());
}
TEST_F
(
LODTensorTester
,
ShareLOD
)
{
LODTensor
new_lod_tensor
;
new_lod_tensor
.
CopyLOD
(
*
lod_tensor
);
ASSERT_EQ
(
new_lod_tensor
.
lod
(),
lod_tensor
->
lod
());
}
TEST_F
(
LODTensorTester
,
CopyLOD
)
{
LODTensor
new_lod_tensor
;
new_lod_tensor
.
CopyLOD
(
*
lod_tensor
);
bool
equals
=
std
::
equal
(
lod_tensor
->
lod
().
begin
(),
lod_tensor
->
lod
().
end
(),
new_lod_tensor
.
lod
().
begin
());
ASSERT_TRUE
(
equals
);
ASSERT_EQ
(
new_lod_tensor
.
tensor
().
data
<
float
>
(),
lod_tensor
.
tensor
().
data
<
float
>
());
}
}
// namespace framework
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录