Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
d697b6a3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d697b6a3
编写于
10月 23, 2017
作者:
W
wanghaoshuang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Modified code using LoDTensor
上级
00ad7512
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
65 addition
and
46 deletion
+65
-46
paddle/framework/lod_tensor.cc
paddle/framework/lod_tensor.cc
+4
-10
paddle/framework/lod_tensor.h
paddle/framework/lod_tensor.h
+1
-1
paddle/operators/seq_expand_op.cc
paddle/operators/seq_expand_op.cc
+5
-5
paddle/operators/seq_expand_op.h
paddle/operators/seq_expand_op.h
+28
-17
python/paddle/v2/framework/tests/op_test.py
python/paddle/v2/framework/tests/op_test.py
+2
-0
python/paddle/v2/framework/tests/test_seq_expand.py
python/paddle/v2/framework/tests/test_seq_expand.py
+25
-13
未找到文件。
paddle/framework/lod_tensor.cc
浏览文件 @
d697b6a3
...
...
@@ -103,25 +103,19 @@ void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin,
lod_
=
new_lod
;
}
Vector
<
size_t
>
expand_lod
(
Vector
<
size_t
>
level
,
Vector
<
size_t
>
start
s
,
Vector
<
size_t
>
expand_lod
(
Vector
<
size_t
>
level
,
Vector
<
size_t
>
indexe
s
,
Vector
<
size_t
>
scales
,
bool
repeat
)
{
Vector
<
size_t
>
result
;
result
.
push_back
(
level
[
0
]);
size_t
p
=
0
,
start
=
0
,
end
=
0
;
size_t
start
=
0
,
end
=
0
;
if
(
!
repeat
)
{
for
(
size_t
i
=
0
;
i
<
scales
.
size
();
++
i
)
{
result
.
push_back
(
result
.
back
()
+
scales
[
i
]
*
(
level
[
i
+
1
]
-
level
[
i
]));
}
}
else
{
for
(
size_t
i
=
0
;
i
<
scales
.
size
();
++
i
)
{
while
(
starts
[
i
]
!=
level
[
p
]
&&
p
<
level
.
size
())
{
++
p
;
}
start
=
p
;
while
(
starts
[
i
+
1
]
!=
level
[
p
]
&&
p
<
level
.
size
())
{
++
p
;
}
end
=
p
+
1
;
start
=
indexes
[
i
];
end
=
indexes
[
i
+
1
];
for
(
size_t
j
=
0
;
j
<
scales
[
i
];
++
j
)
{
for
(
size_t
index
=
start
;
index
<
end
-
1
;
++
index
)
{
result
.
push_back
(
result
.
back
()
+
level
[
index
+
1
]
-
level
[
index
]);
...
...
paddle/framework/lod_tensor.h
浏览文件 @
d697b6a3
...
...
@@ -123,7 +123,7 @@ class LoDTensor : public Tensor {
LoD
lod_
;
};
Vector
<
size_t
>
expand_lod
(
Vector
<
size_t
>
level
,
Vector
<
size_t
>
start
s
,
Vector
<
size_t
>
expand_lod
(
Vector
<
size_t
>
level
,
Vector
<
size_t
>
indexe
s
,
Vector
<
size_t
>
scales
,
bool
repeat
);
}
// namespace framework
...
...
paddle/operators/seq_expand_op.cc
浏览文件 @
d697b6a3
...
...
@@ -77,15 +77,15 @@ by lod of input(Y) or 'repeat' attribute.
Case 1:
Given a 2-level LoDTensor X:
X.data = [
1, 2 , 3, 4
]
X.data = [
a, b , c, d
]
X.lod = [[0, 3, 4], [0, 1, 3, 4]]
and
repeat = 2
then we get 3-level LoDTensor
Out.
data = [1, 2, 3, 1, 2, 3, 4, 4]
Out.lod = [[0, 6
, 8],
[0,
3, 6, 7, 8],
[0, 1, 3, 4, 6, 7, 8]
]
Out.
lod = [[0, 6, 8],
[0, 3, 6, 7
, 8],
[0,
1, 3, 4, 6, 7, 8]]
Out.data = [a, b, c, a, b, c, d, d
]
Case 2:
...
...
paddle/operators/seq_expand_op.h
浏览文件 @
d697b6a3
...
...
@@ -33,15 +33,12 @@ class SeqExpandKernel : public framework::OpKernel<T> {
auto
x_dims
=
x
->
dims
();
auto
x_lod
=
x
->
lod
();
if
(
x_lod
.
size
()
==
0
)
{
framework
::
Vector
<
size_t
>
level
;
for
(
int
i
=
0
;
i
<
x
->
dims
()[
0
]
+
1
;
++
i
)
{
level
.
push_back
(
i
);
}
x_lod
.
push_back
(
level
);
}
else
{
x_lod
.
insert
(
x_lod
.
begin
(),
x_lod
[
0
]);
framework
::
Vector
<
size_t
>
level
;
size_t
num
=
(
x_lod
.
size
()
==
0
)
?
(
x
->
dims
()[
0
]
+
1
)
:
x_lod
[
0
].
size
();
for
(
int
i
=
0
;
i
<
num
;
++
i
)
{
level
.
push_back
(
i
);
}
x_lod
.
push_back
(
level
);
size_t
repeat
=
static_cast
<
size_t
>
(
context
.
Attr
<
int
>
(
"repeat"
));
framework
::
Vector
<
size_t
>
scales
;
...
...
@@ -56,19 +53,27 @@ class SeqExpandKernel : public framework::OpKernel<T> {
}
else
{
auto
*
y
=
context
.
Input
<
LoDTensor
>
(
"Y"
);
auto
y_lod
=
y
->
lod
();
for
(
int
i
=
0
;
i
<
y_lod
[
0
].
size
()
-
1
;
++
i
)
{
scales
.
push_back
((
y_lod
[
0
][
i
+
1
]
-
y_lod
[
0
][
i
])
/
(
x_lod
[
0
][
i
+
1
]
-
x_lod
[
0
][
i
]));
auto
y_abs_lod
=
y_lod
.
ToAbsOffset
();
auto
x_abs_lod
=
x_lod
.
ToAbsOffset
();
for
(
int
i
=
0
;
i
<
y_abs_lod
[
0
].
size
()
-
1
;
++
i
)
{
scales
.
push_back
((
y_abs_lod
[
0
][
i
+
1
]
-
y_abs_lod
[
0
][
i
])
/
(
x_abs_lod
[
0
][
i
+
1
]
-
x_abs_lod
[
0
][
i
]));
}
out
->
Resize
(
y
->
dims
());
}
framework
::
Vector
<
size_t
>
indexes
;
for
(
int
size_t
i
=
0
;
i
<
x_lod
[
0
];
++
i
)
{
indexes
[
i
]
=
x_lod
[
0
];
}
framework
::
LoD
out_lod
;
auto
level0
=
framework
::
expand_lod
(
x_lod
[
0
]
,
x_lod
[
0
],
scales
,
false
);
auto
level0
=
framework
::
expand_lod
(
indexes
,
x_lod
[
0
],
scales
,
false
);
out_lod
.
push_back
(
level0
);
for
(
int
i
=
1
;
i
<
x_lod
.
size
();
++
i
)
{
out_lod
.
push_back
(
framework
::
expand_lod
(
x_lod
[
i
],
x_lod
[
0
],
scales
,
true
));
for
(
int
j
=
0
;
j
<
indexes
.
size
();
++
j
)
{
indexes
[
j
]
=
x_lod
[
i
-
1
][
indexes
[
j
]];
}
out_lod
.
push_back
(
framework
::
expand_lod
(
x_lod
[
i
],
indexes
,
scales
,
true
));
}
size_t
element_len
=
framework
::
product
(
x_dims
)
/
x_dims
[
0
];
...
...
@@ -80,7 +85,7 @@ class SeqExpandKernel : public framework::OpKernel<T> {
if
(
platform
::
is_cpu_place
(
place
))
{
auto
&
cpu_place
=
boost
::
get
<
platform
::
CPUPlace
>
(
place
);
for
(
size_t
i
=
0
;
i
<
scales
.
size
();
++
i
)
{
count
=
element_len
*
(
x_
lod
[
0
][
i
+
1
]
-
x
_lod
[
0
][
i
]);
count
=
element_len
*
(
x_
abs_lod
[
0
][
i
+
1
]
-
x_abs
_lod
[
0
][
i
]);
for
(
size_t
j
=
0
;
j
<
scales
[
i
];
++
j
)
{
memory
::
Copy
(
cpu_place
,
out_data
,
cpu_place
,
x_data
,
sizeof
(
T
)
*
count
);
...
...
@@ -95,7 +100,7 @@ class SeqExpandKernel : public framework::OpKernel<T> {
context
.
device_context
())
.
stream
();
for
(
size_t
i
=
0
;
i
<
scales
.
size
();
++
i
)
{
count
=
element_len
*
(
x_
lod
[
0
][
i
+
1
]
-
x
_lod
[
0
][
i
]);
count
=
element_len
*
(
x_
abs_lod
[
0
][
i
+
1
]
-
x_abs
_lod
[
0
][
i
]);
for
(
size_t
j
=
0
;
j
<
scales
[
i
];
++
j
)
{
memory
::
Copy
(
gpu_place
,
out_data
,
gpu_place
,
x_data
,
sizeof
(
T
)
*
count
,
stream
);
...
...
@@ -109,6 +114,11 @@ class SeqExpandKernel : public framework::OpKernel<T> {
}
out
->
set_lod
(
out_lod
);
for
(
size_t
i
=
0
;
i
<
lod
.
size
;
i
++
)
{
for
(
size_t
j
=
0
;
j
<
lod
[
i
].
size
();
j
++
)
{
LOG
(
INFO
)
<<
"lod["
<<
i
<<
"]["
<<
j
"] = "
<<
lod
[
i
][
j
];
}
}
}
};
...
...
@@ -121,13 +131,14 @@ class SeqExpandGradKernel : public framework::OpKernel<T> {
auto
*
out
=
context
.
Input
<
LoDTensor
>
(
"Out"
);
auto
*
d_x
=
context
.
Output
<
LoDTensor
>
(
framework
::
GradVarName
(
"X"
));
auto
out_lod
=
out
->
lod
();
auto
out_abs_lod
=
out_lod
.
ToAbsOffset
();
d_x
->
set_lod
(
x
->
lod
());
const
T
*
d_out_data
=
d_out
->
data
<
T
>
();
auto
d_out_dims
=
d_out
->
dims
();
T
*
d_x_data
=
d_x
->
mutable_data
<
T
>
(
context
.
GetPlace
());
size_t
element_len
=
framework
::
product
(
d_out_dims
)
/
d_out_dims
[
0
];
for
(
size_t
i
=
0
;
i
<
out
->
NumElements
();
++
i
)
{
size_t
ele_count
=
out_
lod
[
0
][
i
+
1
]
-
out
_lod
[
0
][
i
];
size_t
ele_count
=
out_
abs_lod
[
0
][
i
+
1
]
-
out_abs
_lod
[
0
][
i
];
size_t
repeat
=
out
->
NumElements
(
0
,
i
);
Eigen
::
TensorMap
<
Eigen
::
Tensor
<
const
T
,
2
>>
d_out_t
(
d_out_data
,
static_cast
<
int
>
(
repeat
),
...
...
python/paddle/v2/framework/tests/op_test.py
浏览文件 @
d697b6a3
...
...
@@ -246,6 +246,8 @@ class OpTest(unittest.TestCase):
else
:
actual
=
np
.
array
(
self
.
scope
.
find_var
(
out_name
).
get_tensor
())
expect
=
self
.
outputs
[
out_name
]
print
"actual= %s"
%
actual
print
"expect = %s"
%
expect
self
.
assertTrue
(
np
.
allclose
(
actual
,
expect
,
atol
=
atol
),
...
...
python/paddle/v2/framework/tests/test_seq_expand.py
浏览文件 @
d697b6a3
...
...
@@ -27,7 +27,15 @@ def repeat_array(array, starts, times):
return
newlist
def
toAbsOffset
(
lod
):
for
i
in
range
(
len
(
lod
)
-
2
,
-
1
,
-
1
):
for
j
in
range
(
len
(
lod
[
i
])):
lod
[
i
][
j
]
=
lod
[
i
+
1
][
lod
[
i
][
j
]]
return
lod
class
TestSeqExpand
(
OpTest
):
#class TestSeqExpand():
def
set_data
(
self
):
x_data
=
np
.
random
.
uniform
(
0.1
,
1
,
[
4
,
1
]).
astype
(
'float32'
)
self
.
inputs
=
{
'X'
:
x_data
}
...
...
@@ -35,23 +43,26 @@ class TestSeqExpand(OpTest):
def
compute
(
self
):
x
=
self
.
inputs
[
'X'
]
print
"x= %s"
%
x
x_data
,
x_lod
=
x
if
type
(
x
)
==
tuple
else
(
x
,
None
)
if
not
x_lod
:
x_lod
=
[[
i
for
i
in
range
(
1
+
x_data
.
shape
[
0
])]]
else
:
x_lod
=
[
x_lod
[
0
]]
+
x_lod
n
=
1
+
x_data
.
shape
[
0
]
if
not
x_lod
else
len
(
x_lod
[
0
])
x_lod
=
[[
i
for
i
in
range
(
n
)]]
+
x_lod
x_abs_lod
=
toAbsOffset
(
x_lod
)
if
self
.
repeat
:
print
"repeat= %s"
%
self
.
repeat
self
.
attrs
=
{
'repeat'
:
self
.
repeat
}
repeats
=
(
len
(
x_lod
[
0
])
-
1
)
*
[
self
.
repeat
]
else
:
y_data
,
y_lod
=
self
.
inputs
[
'Y'
]
repeats
=
[((
y_lod
[
0
][
i
+
1
]
-
y_lod
[
0
][
i
])
/
(
x_lod
[
0
][
i
+
1
]
-
x_lod
[
0
][
i
]))
for
i
in
range
(
len
(
y_lod
[
0
])
-
1
)]
out_lod
=
[
repeat
(
x_lod
[
0
],
x_lod
[
0
],
repeats
,
True
)]
+
[
repeat
(
lod
,
x_lod
[
0
],
repeats
,
False
)
for
lod
in
x_lod
[
1
:]
]
out
=
repeat_array
(
x_data
.
tolist
(),
x_lod
[
0
],
repeats
)
print
"y_lod: %s"
%
y_lod
y_abs_lod
=
toAbsOffset
(
y_lod
)
repeats
=
[((
y_abs_lod
[
0
][
i
+
1
]
-
y_abs_lod
[
0
][
i
])
/
(
x_abs_lod
[
0
][
i
+
1
]
-
x_abs_lod
[
0
][
i
]))
for
i
in
range
(
len
(
y_abs_lod
[
0
])
-
1
)]
#out_lod = [repeat(x_lod[0], x_lod[0], repeats, True)] + [
# repeat(lod, x_lod[0], repeats, False) for lod in x_lod[1:]
#]
out
=
repeat_array
(
x_data
.
tolist
(),
x_abs_lod
[
0
],
repeats
)
self
.
outputs
=
{
'Out'
:
out
}
def
setUp
(
self
):
...
...
@@ -69,7 +80,7 @@ class TestSeqExpand(OpTest):
class
TestSeqExpandCase1
(
TestSeqExpand
):
def
set_data
(
self
):
x_data
=
np
.
random
.
uniform
(
0.1
,
1
,
[
7
,
1
]).
astype
(
'float32'
)
x_lod
=
[[
0
,
5
,
7
],
[
0
,
2
,
5
,
7
]]
x_lod
=
[[
0
,
2
,
3
],
[
0
,
2
,
5
,
7
]]
self
.
inputs
=
{
'X'
:
(
x_data
,
x_lod
)}
self
.
repeat
=
2
...
...
@@ -95,10 +106,11 @@ class TestSeqExpandCase4(TestSeqExpand):
x_data
=
np
.
random
.
uniform
(
0.1
,
1
,
[
5
,
1
]).
astype
(
'float32'
)
x_lod
=
[[
0
,
2
,
5
]]
y_data
=
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
1
]).
astype
(
'float32'
)
y_lod
=
[[
0
,
4
,
13
],
[
0
,
2
,
4
,
7
,
10
,
13
]]
y_lod
=
[[
0
,
2
,
5
],
[
0
,
2
,
4
,
7
,
10
,
13
]]
self
.
inputs
=
{
'X'
:
(
x_data
,
x_lod
),
'Y'
:
(
y_data
,
y_lod
)}
self
.
repeat
=
None
if
__name__
==
'__main__'
:
unittest
.
main
()
# TestSeqExpandCase4().setUp()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录