Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
47622d7f
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
47622d7f
编写于
1月 19, 2018
作者:
Y
Yu Yang
提交者:
GitHub
1月 19, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #7624 from tonyyang-svail/7450
Make merge and split support lodtensor
上级
259fcc30
d002f60a
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
111 addition
and
22 deletion
+111
-22
paddle/framework/lod_tensor.cc
paddle/framework/lod_tensor.cc
+46
-22
paddle/framework/lod_tensor_test.cc
paddle/framework/lod_tensor_test.cc
+65
-0
未找到文件。
paddle/framework/lod_tensor.cc
浏览文件 @
47622d7f
...
...
@@ -286,18 +286,18 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
DeserializeFromStream
(
is
,
static_cast
<
Tensor
*>
(
tensor
),
dev_ctx
);
}
// TODO(tonyyang-svail): make this function support LoD
std
::
vector
<
LoDTensor
>
LoDTensor
::
SplitLoDTensor
(
const
std
::
vector
<
platform
::
Place
>
places
)
const
{
check_memory_size
();
PADDLE_ENFORCE
(
lod
().
empty
(),
"Disable parallel lod for now"
);
size_t
result_size
=
std
::
min
(
static_cast
<
size_t
>
(
dims
()[
0
]),
places
.
size
());
size_t
remainder
=
dims
()[
0
]
%
places
.
size
();
int
batch_size
=
lod
().
empty
()
?
dims
()[
0
]
:
static_cast
<
int
>
(
lod
()[
0
].
size
())
-
1
;
size_t
result_size
=
std
::
min
(
static_cast
<
size_t
>
(
batch_size
),
places
.
size
());
size_t
remainder
=
batch_size
%
places
.
size
();
std
::
vector
<
LoDTensor
>
results
;
results
.
reserve
(
result_size
);
int
step_width
=
static_cast
<
int
>
(
dims
()[
0
]
/
result_size
);
int
step_width
=
static_cast
<
int
>
(
batch_size
/
result_size
);
for
(
size_t
i
=
0
;
i
<
result_size
;
++
i
)
{
int
begin
=
static_cast
<
int
>
(
i
*
step_width
);
int
end
=
static_cast
<
int
>
((
i
+
1
)
*
step_width
);
...
...
@@ -305,13 +305,28 @@ std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
end
+=
remainder
;
}
LoDTensor
dst
;
if
(
lod
().
empty
())
{
auto
src
=
Slice
(
begin
,
end
);
auto
&
dst_place
=
places
[
i
];
LoDTensor
dst
;
if
(
!
(
dst_place
==
place
()))
{
framework
::
Copy
(
src
,
dst_place
,
&
dst
);
}
else
{
// It is no need to copy if src_place and dst_place are same.
dst
.
ShareDataWith
(
src
);
}
else
{
auto
lod_and_offset
=
GetSubLoDAndAbsoluteOffset
(
lod
(),
begin
,
end
,
0
);
auto
&
offset
=
lod_and_offset
.
second
;
auto
src
=
Slice
(
offset
.
first
,
offset
.
second
);
auto
&
dst_place
=
places
[
i
];
framework
::
Copy
(
src
,
dst_place
,
&
dst
);
LoD
my_lod
;
for
(
auto
&
l
:
lod_and_offset
.
first
)
{
std
::
vector
<
size_t
>
v
{
0
};
for
(
auto
&
ll
:
l
)
{
v
.
push_back
(
ll
+
v
.
back
());
}
my_lod
.
emplace_back
(
v
);
}
dst
.
set_lod
(
my_lod
);
}
results
.
emplace_back
(
dst
);
}
...
...
@@ -319,29 +334,38 @@ std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
return
results
;
}
// TODO(tonyyang-svail): make this function support LoD
void
LoDTensor
::
MergeLoDTensor
(
const
std
::
vector
<
const
LoDTensor
*>
&
lod_tensors
,
platform
::
Place
dst_place
)
{
PADDLE_ENFORCE
(
!
lod_tensors
.
empty
());
framework
::
DDim
new_dim
=
lod_tensors
[
0
]
->
dims
();
std
::
type_index
new_type
=
lod_tensors
[
0
]
->
type
();
auto
new_layout
=
lod_tensors
[
0
]
->
layout
();
int64_t
new_height
=
0
;
for
(
auto
*
lod
:
lod_tensors
)
{
new_height
+=
lod
->
dims
()[
0
];
for
(
int
i
=
1
;
i
<
new_dim
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
new_dim
[
i
],
lod
->
dims
()[
i
]);
}
framework
::
DataLayout
new_layout
=
lod_tensors
[
0
]
->
layout
();
LoD
new_lod
=
lod_tensors
[
0
]
->
lod
();
for
(
size_t
i
=
1
;
i
<
lod_tensors
.
size
();
++
i
)
{
auto
*
t
=
lod_tensors
[
i
];
PADDLE_ENFORCE_EQ
(
new_type
.
hash_code
(),
t
->
type
().
hash_code
());
PADDLE_ENFORCE_EQ
(
new_layout
,
t
->
layout
());
PADDLE_ENFORCE_EQ
(
new_type
,
lod
->
type
());
PADDLE_ENFORCE_EQ
(
new_layout
,
lod
->
layout
());
PADDLE_ENFORCE_EQ
(
framework
::
product
(
new_dim
)
/
new_dim
[
0
],
framework
::
product
(
t
->
dims
())
/
t
->
dims
()[
0
]);
new_dim
[
0
]
+=
t
->
dims
()[
0
];
auto
&
lod
=
t
->
lod
();
for
(
size_t
j
=
0
;
j
<
lod
.
size
();
++
j
)
{
auto
&
sub_lod
=
new_lod
[
j
];
auto
&
offset
=
sub_lod
.
back
();
for
(
size_t
k
=
1
;
k
<
lod
[
j
].
size
();
++
k
)
{
sub_lod
.
push_back
(
lod
[
j
][
k
]
+
offset
);
}
}
}
new_dim
[
0
]
=
new_height
;
Resize
(
new_dim
);
set_layout
(
new_layout
);
set_lod
(
new_lod
);
mutable_data
(
dst_place
,
new_type
);
int
begin
=
0
;
for
(
auto
*
src
:
lod_tensors
)
{
int
end
=
begin
+
src
->
dims
()[
0
];
...
...
paddle/framework/lod_tensor_test.cc
浏览文件 @
47622d7f
...
...
@@ -100,6 +100,71 @@ TEST(LoD, ToAbsOffset) {
EXPECT_EQ
(
abs_lod
,
expected
);
}
TEST
(
LoD
,
SplitLoDTensor
)
{
LoD
lod
;
lod
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
2
,
4
,
5
,
6
}));
lod
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
1
,
6
,
8
,
13
,
15
,
20
}));
platform
::
CPUPlace
place
;
LoDTensor
lod_tensor
;
lod_tensor
.
Resize
({
20
,
1
});
float
*
dst_ptr
=
lod_tensor
.
mutable_data
<
float
>
(
place
);
for
(
int
i
=
0
;
i
<
lod_tensor
.
numel
();
++
i
)
{
dst_ptr
[
i
]
=
i
;
}
lod_tensor
.
set_lod
(
lod
);
std
::
vector
<
platform
::
Place
>
places
{
platform
::
CPUPlace
(),
platform
::
CPUPlace
()};
LoD
lod0
;
lod0
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
2
,
4
}));
lod0
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
1
,
6
,
8
,
13
}));
LoD
lod1
;
lod1
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
1
,
2
}));
lod1
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
2
,
7
}));
auto
lods
=
lod_tensor
.
SplitLoDTensor
(
places
);
EXPECT_EQ
(
lods
[
0
].
lod
(),
lod0
);
EXPECT_EQ
(
lods
[
1
].
lod
(),
lod1
);
}
TEST
(
LoD
,
MergeLoDTensor
)
{
LoD
lod
;
lod
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
2
,
4
,
5
,
6
}));
lod
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
1
,
6
,
8
,
13
,
15
,
20
}));
platform
::
CPUPlace
place
;
LoDTensor
lod_tensor0
;
LoD
lod0
;
lod0
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
2
,
4
}));
lod0
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
1
,
6
,
8
,
13
}));
lod_tensor0
.
set_lod
(
lod0
);
lod_tensor0
.
Resize
({
13
,
1
});
float
*
dst_ptr
=
lod_tensor0
.
mutable_data
<
float
>
(
place
);
for
(
int
i
=
0
;
i
<
lod_tensor0
.
numel
();
++
i
)
{
dst_ptr
[
i
]
=
i
;
}
LoDTensor
lod_tensor1
;
LoD
lod1
;
lod1
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
1
,
2
}));
lod1
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
2
,
7
}));
lod_tensor1
.
set_lod
(
lod1
);
lod_tensor1
.
Resize
({
7
,
1
});
dst_ptr
=
lod_tensor1
.
mutable_data
<
float
>
(
place
);
for
(
int
i
=
0
;
i
<
lod_tensor1
.
numel
();
++
i
)
{
dst_ptr
[
i
]
=
i
;
}
std
::
vector
<
const
LoDTensor
*>
lods
{
&
lod_tensor0
,
&
lod_tensor1
};
LoDTensor
lod_tensor
;
lod_tensor
.
MergeLoDTensor
(
lods
,
place
);
EXPECT_EQ
(
lod_tensor
.
lod
(),
lod
);
}
TEST
(
LoD
,
CheckLoD
)
{
LoD
relative_lod
;
relative_lod
.
push_back
(
std
::
vector
<
size_t
>
({
0
,
2
}));
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录