Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
20a6ae7f
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
20a6ae7f
编写于
10月 06, 2017
作者:
Y
Yan Chunwei
提交者:
GitHub
10月 06, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Feature/tensor array add python binding (#4616)
上级
4c96008a
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
161 addition
and
1 deletion
+161
-1
paddle/framework/tensor_array.h
paddle/framework/tensor_array.h
+3
-0
paddle/pybind/CMakeLists.txt
paddle/pybind/CMakeLists.txt
+1
-1
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+51
-0
python/paddle/v2/framework/tests/test_tensor_array.py
python/paddle/v2/framework/tests/test_tensor_array.py
+106
-0
未找到文件。
paddle/framework/tensor_array.h
浏览文件 @
20a6ae7f
...
@@ -26,6 +26,9 @@ namespace framework {
...
@@ -26,6 +26,9 @@ namespace framework {
* in original lod-tensor.
* in original lod-tensor.
*/
*/
struct
DySeqMeta
{
struct
DySeqMeta
{
DySeqMeta
(
size_t
begin
,
size_t
end
,
size_t
ori_idx
)
:
begin
(
begin
),
end
(
end
),
ori_idx
(
ori_idx
)
{}
size_t
begin
;
size_t
begin
;
size_t
end
;
// not included
size_t
end
;
// not included
size_t
ori_idx
;
size_t
ori_idx
;
...
...
paddle/pybind/CMakeLists.txt
浏览文件 @
20a6ae7f
if
(
WITH_PYTHON
)
if
(
WITH_PYTHON
)
cc_library
(
paddle_pybind SHARED
cc_library
(
paddle_pybind SHARED
SRCS pybind.cc exception.cc protobuf.cc
SRCS pybind.cc exception.cc protobuf.cc
DEPS pybind python backward proto_desc
DEPS pybind python backward proto_desc
tensor_array
${
GLOB_OP_LIB
}
)
${
GLOB_OP_LIB
}
)
endif
(
WITH_PYTHON
)
endif
(
WITH_PYTHON
)
paddle/pybind/pybind.cc
浏览文件 @
20a6ae7f
...
@@ -16,6 +16,7 @@ limitations under the License. */
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/framework/backward.h"
#include "paddle/framework/backward.h"
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/tensor_array.h"
#include "paddle/operators/cond_op.h"
#include "paddle/operators/cond_op.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/recurrent_op.h"
#include "paddle/operators/recurrent_op.h"
...
@@ -286,6 +287,56 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -286,6 +287,56 @@ All parameter, weight, gradient are variables in Paddle.
self
->
CompleteAddOp
();
self
->
CompleteAddOp
();
});
});
py
::
class_
<
framework
::
TensorArray
>
(
m
,
"TensorArray"
)
.
def
(
"__init__"
,
[](
TensorArray
&
instance
)
{
new
(
&
instance
)
TensorArray
();
})
.
def
(
"read"
,
[](
TensorArray
&
self
,
size_t
index
)
{
return
self
.
Read
(
index
);
})
.
def
(
"write"
,
[](
TensorArray
&
self
,
size_t
index
,
LoDTensor
&
value
)
{
self
.
Write
(
index
,
value
);
})
.
def
(
"write_shared"
,
[](
TensorArray
&
self
,
size_t
index
,
const
LoDTensor
&
value
)
{
self
.
WriteShared
(
index
,
value
);
})
.
def
(
"size"
,
[](
TensorArray
&
self
)
{
return
self
.
size
();
})
.
def
(
"pack"
,
[](
TensorArray
&
self
,
size_t
level
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
meta_info
,
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
lod
)
{
std
::
vector
<
DySeqMeta
>
meta
;
for
(
auto
&
info
:
meta_info
)
{
PADDLE_ENFORCE_EQ
(
info
.
size
(),
3UL
);
meta
.
emplace_back
(
info
[
0
],
info
[
1
],
info
[
2
]);
}
#ifndef PADDLE_WITH_CUDA
return
self
.
Pack
(
level
,
meta
,
lod
);
#else
LoD
new_lod
;
new_lod
.
reserve
(
lod
.
size
());
std
::
copy
(
lod
.
begin
(),
lod
.
end
(),
std
::
back_inserter
(
new_lod
));
return
self
.
Pack
(
level
,
meta
,
new_lod
);
#endif
})
.
def
(
"unpack"
,
[](
TensorArray
&
self
,
const
LoDTensor
&
source
,
int
level
,
bool
length_descend
)
{
auto
metas
=
self
.
Unpack
(
source
,
level
,
length_descend
);
std
::
vector
<
std
::
vector
<
size_t
>>
meta_info
;
for
(
auto
meta
:
metas
)
{
meta_info
.
emplace_back
(
std
::
vector
<
size_t
>
({
meta
.
begin
,
meta
.
end
,
meta
.
ori_idx
}));
}
return
meta_info
;
})
.
def
(
"stack"
,
[](
TensorArray
&
self
)
{
return
self
.
Stack
();
})
.
def
(
"unstack"
,
[](
TensorArray
&
self
,
const
LoDTensor
&
source
)
{
return
self
.
Unstack
(
source
);
})
.
def
(
"unstack_shared"
,
[](
TensorArray
&
self
,
const
LoDTensor
&
source
)
{
return
self
.
UnstackShared
(
source
);
});
// recurrent_op
// recurrent_op
py
::
class_
<
operators
::
RecurrentOp
,
OperatorBase
>
(
m
,
"RecurrentOp"
)
py
::
class_
<
operators
::
RecurrentOp
,
OperatorBase
>
(
m
,
"RecurrentOp"
)
.
def_static
(
.
def_static
(
...
...
python/paddle/v2/framework/tests/test_tensor_array.py
0 → 100644
浏览文件 @
20a6ae7f
import
logging
import
paddle.v2.framework.core
as
core
import
unittest
import
numpy
as
np
class
TestTensorArray
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
ta
=
core
.
TensorArray
()
self
.
batch_size
=
10
self
.
dim
=
2
# create a LoDTensor
self
.
scope
=
core
.
Scope
()
var
=
self
.
scope
.
new_var
(
"test_tensor"
)
self
.
place
=
core
.
CPUPlace
()
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
self
.
batch_size
,
self
.
dim
])
tensor
.
alloc_float
(
self
.
place
)
tensor_array
=
np
.
array
(
tensor
)
tensor_array
[
0
,
0
]
=
0
tensor_array
[
1
,
0
]
=
1
tensor_array
[
2
,
0
]
=
2
tensor_array
[
3
,
0
]
=
3
tensor_array
[
4
,
0
]
=
4
tensor_array
[
5
,
0
]
=
5
tensor_array
[
6
,
0
]
=
6
tensor_array
[
7
,
0
]
=
7
tensor_array
[
8
,
0
]
=
8
tensor_array
[
9
,
0
]
=
9
lod_py
=
[[
0
,
2
,
5
,
10
]]
lod_tensor
=
core
.
LoDTensor
(
lod_py
)
lod_tensor
.
set
(
tensor_array
,
self
.
place
)
self
.
py_seq_meta
=
[[
5
,
10
,
2
],
[
2
,
5
,
1
],
[
0
,
2
,
0
]]
self
.
tensor
=
lod_tensor
def
test_unstack
(
self
):
self
.
ta
.
unstack
(
self
.
tensor
)
self
.
assertEqual
(
self
.
tensor
.
get_dims
()[
0
],
self
.
ta
.
size
())
def
test_read
(
self
):
self
.
ta
.
unstack
(
self
.
tensor
)
for
i
in
range
(
self
.
batch_size
):
tensor
=
self
.
ta
.
read
(
i
)
def
test_write
(
self
):
self
.
ta
.
unstack
(
self
.
tensor
)
# create a tensor with shape of [1, self.dim]
var
=
self
.
scope
.
new_var
(
"hell"
)
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
1
,
self
.
dim
])
tensor
.
alloc_float
(
self
.
place
)
tensor_array
=
np
.
array
(
tensor
)
for
i
in
range
(
self
.
dim
):
tensor_array
[
0
,
i
]
=
i
tensor
.
set
(
tensor_array
,
self
.
place
)
self
.
ta
.
write
(
2
,
tensor
)
ta_tensor
=
self
.
ta
.
read
(
2
)
ta_tensor_array
=
np
.
array
(
ta_tensor
)
self
.
assertEqual
(
ta_tensor
.
get_dims
(),
[
1
,
self
.
dim
])
self
.
assertTrue
((
tensor_array
==
ta_tensor_array
).
all
())
def
test_write_shared
(
self
):
self
.
ta
.
unstack
(
self
.
tensor
)
# create a tensor with shape of [1, self.dim]
var
=
self
.
scope
.
new_var
(
"hell"
)
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
([
1
,
self
.
dim
])
tensor
.
alloc_float
(
self
.
place
)
tensor_array
=
np
.
array
(
tensor
)
for
i
in
range
(
self
.
dim
):
tensor_array
[
0
,
i
]
=
i
tensor
.
set
(
tensor_array
,
self
.
place
)
self
.
ta
.
write_shared
(
2
,
tensor
)
ta_tensor
=
self
.
ta
.
read
(
2
)
ta_tensor_array
=
np
.
array
(
ta_tensor
)
self
.
assertEqual
(
ta_tensor
.
get_dims
(),
[
1
,
self
.
dim
])
self
.
assertTrue
((
tensor_array
==
ta_tensor_array
).
all
())
def
test_unpack
(
self
):
meta
=
self
.
ta
.
unpack
(
self
.
tensor
,
0
,
True
)
self
.
assertEqual
(
self
.
ta
.
size
(),
5
)
self
.
assertEqual
(
meta
,
self
.
py_seq_meta
)
def
test_pack
(
self
):
meta
=
self
.
ta
.
unpack
(
self
.
tensor
,
0
,
True
)
print
"meta"
,
meta
tensor
=
self
.
ta
.
pack
(
0
,
meta
,
self
.
tensor
.
lod
())
print
np
.
array
(
self
.
tensor
)
print
np
.
array
(
tensor
)
self
.
assertTrue
((
np
.
array
(
self
.
tensor
)
==
np
.
array
(
tensor
)).
all
())
self
.
assertTrue
(
tensor
.
lod
(),
self
.
tensor
.
lod
())
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录