Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
1c1f73b4
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
694
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
1c1f73b4
编写于
10月 13, 2017
作者:
Y
Yan Chunwei
提交者:
GitHub
10月 13, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Feature/dynamic recurrent op forward test (#4729)
上级
6316b40a
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
323 addition
and
69 deletion
+323
-69
paddle/framework/tensor_array.cc
paddle/framework/tensor_array.cc
+17
-11
paddle/framework/tensor_array.h
paddle/framework/tensor_array.h
+9
-3
paddle/operators/dynamic_recurrent_op.cc
paddle/operators/dynamic_recurrent_op.cc
+110
-52
paddle/operators/dynamic_recurrent_op.h
paddle/operators/dynamic_recurrent_op.h
+25
-1
paddle/operators/dynamic_recurrent_op_test.cc
paddle/operators/dynamic_recurrent_op_test.cc
+0
-1
paddle/operators/sum_op.cc
paddle/operators/sum_op.cc
+1
-1
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+28
-0
python/paddle/v2/framework/op.py
python/paddle/v2/framework/op.py
+22
-0
python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py
...on/paddle/v2/framework/tests/test_dynamic_recurrent_op.py
+111
-0
未找到文件。
paddle/framework/tensor_array.cc
浏览文件 @
1c1f73b4
...
...
@@ -76,6 +76,17 @@ LoDTensor PackDynamicBatch(const std::vector<LoDTensor>& source,
const
std
::
vector
<
DySeqMeta
>&
meta
,
const
LoD
&
lod
,
size_t
level
);
std
::
vector
<
size_t
>
GenDyBatchIndice
(
const
DySeqMetaBatch
&
meta
,
int
batch_id
)
{
// collect indice need to copy to the batch
std
::
vector
<
size_t
>
indice
;
for
(
const
auto
&
seq
:
meta
)
{
size_t
id
=
seq
.
begin
+
batch_id
;
if
(
id
>=
seq
.
end
)
break
;
indice
.
push_back
(
id
);
}
return
indice
;
}
}
// namespace detail
const
LoDTensor
&
TensorArray
::
Read
(
size_t
index
)
const
{
...
...
@@ -113,8 +124,8 @@ LoDTensor TensorArray::Pack(size_t level, const std::vector<DySeqMeta>& meta,
return
detail
::
PackDynamicBatch
(
values_
,
meta
,
lod
,
level
);
}
std
::
vector
<
DySeqMeta
>
TensorArray
::
Unpack
(
const
LoDTensor
&
source
,
int
level
,
bool
length_desend
)
{
DySeqMetaBatch
TensorArray
::
Unpack
(
const
LoDTensor
&
source
,
int
level
,
bool
length_desend
)
{
detail
::
DynamicBatchUnpacker
unpacker
(
source
,
level
,
length_desend
/*descend*/
);
...
...
@@ -129,6 +140,7 @@ std::vector<DySeqMeta> TensorArray::Unpack(const LoDTensor& source, int level,
Write
(
batch_id
,
unpacker
.
GetBatch
(
batch_id
));
}
PADDLE_ENFORCE
(
!
unpacker
.
meta
.
empty
());
return
unpacker
.
meta
;
}
...
...
@@ -218,13 +230,7 @@ LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) {
PADDLE_ENFORCE
(
!
meta
.
empty
(),
"should build meta first"
);
LoDTensor
result
;
// collect indice need to copy to the batch
std
::
vector
<
size_t
>
indice
;
for
(
const
auto
&
seq
:
meta
)
{
size_t
id
=
seq
.
begin
+
index
;
if
(
id
>=
seq
.
end
)
break
;
indice
.
push_back
(
id
);
}
auto
indice
=
detail
::
GenDyBatchIndice
(
meta
,
index
);
PADDLE_ENFORCE
(
!
indice
.
empty
(),
"invalid batch at %d"
,
index
);
// copy the indice of records in LoDTensor
...
...
@@ -237,9 +243,9 @@ LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) {
for
(
size_t
i
=
0
;
i
<
indice
.
size
();
i
++
)
{
auto
index
=
indice
[
i
];
auto
target
=
result
.
Slice
<
value_type
>
(
i
,
i
+
1
);
auto
s
ource_
=
source
->
Slice
<
value_type
>
(
index
,
index
+
1
);
auto
s
lice
=
source
->
Slice
<
value_type
>
(
index
,
index
+
1
);
target
.
CopyFrom
<
value_type
>
(
s
ource_
,
platform
::
CPUPlace
(),
target
.
CopyFrom
<
value_type
>
(
s
lice
,
platform
::
CPUPlace
(),
platform
::
CPUDeviceContext
());
}
...
...
paddle/framework/tensor_array.h
浏览文件 @
1c1f73b4
...
...
@@ -34,6 +34,13 @@ struct DySeqMeta {
size_t
ori_idx
;
};
using
DySeqMetaBatch
=
std
::
vector
<
DySeqMeta
>
;
/*
* Extract the indices of instances.
*/
std
::
vector
<
size_t
>
GenDyBatchIndice
(
const
DySeqMetaBatch
&
metas
,
int
batch_id
);
/*
* TensorArray is a C-array-like array of tensors, it is meant to be used with
* dynamic iteration primitives such as while_loop. It is used to segment inputs
...
...
@@ -69,7 +76,7 @@ class TensorArray {
* Recover the original LoD-arranged LoDTensor with the `values`, `level` and
* `indice_map`.
*/
LoDTensor
Pack
(
size_t
level
,
const
std
::
vector
<
DySeqMeta
>
&
meta
,
LoDTensor
Pack
(
size_t
level
,
const
DySeqMetaBatch
&
meta
,
const
LoD
&
lod
)
const
;
/*
...
...
@@ -77,8 +84,7 @@ class TensorArray {
* `values`, if set `desend`, will sort by length in descending order else in
* ascending order.
*/
std
::
vector
<
DySeqMeta
>
Unpack
(
const
LoDTensor
&
source
,
int
level
,
bool
length_desend
);
DySeqMetaBatch
Unpack
(
const
LoDTensor
&
source
,
int
level
,
bool
length_desend
);
/*
* Pack the values into a tensor with rank one higher than each tensor in
...
...
paddle/operators/dynamic_recurrent_op.cc
浏览文件 @
1c1f73b4
...
...
@@ -23,6 +23,7 @@ using framework::Scope;
using
framework
::
TensorArray
;
using
framework
::
LoDTensor
;
using
framework
::
Variable
;
using
framework
::
DySeqMetaBatch
;
namespace
detail
{
...
...
@@ -33,6 +34,29 @@ inline void CreateVariables(Scope& scope,
}
}
/*
* The inputs with sequence should be reordered when they are split, so the
* boot_states should be reordered in the same order.
*
* NOTE This may require that the `pre_state` of the first time step should just
* copy the `boot_state` rather than reference it, for that the content should
* be reordered, but the RNN op should not change the `boot_state` as an input
* variable's content.
*/
template
<
typename
T
>
inline
void
ReorderBootState
(
const
DySeqMetaBatch
&
metas
,
const
LoDTensor
&
boot_state
,
LoDTensor
*
tensor
,
const
platform
::
Place
&
dst_place
)
{
for
(
size_t
seq_id
=
0
;
seq_id
<
metas
.
size
();
seq_id
++
)
{
auto
slice
=
tensor
->
Slice
<
T
>
(
seq_id
,
seq_id
+
1
);
auto
boot_slice
=
boot_state
.
Slice
<
T
>
(
metas
[
seq_id
].
ori_idx
,
metas
[
seq_id
].
ori_idx
+
1
);
// TODO(superjom) pass in device context as an argument
slice
.
template
CopyFrom
<
T
>(
boot_slice
,
dst_place
,
platform
::
CPUDeviceContext
());
}
}
}
// namespace detail
class
DynamicRecurrentOpProtoAndCheckerMaker
...
...
@@ -69,6 +93,7 @@ void DynamicRecurrentOp::Run(const Scope& scope,
CreateScopes
();
WriteStepInputs
();
InitStates
();
WriteStepOutputs
();
// call stepnet in all the time steps
for
(
size_t
step
=
0
;
step
<
cache_
.
num_steps
;
step
++
)
{
...
...
@@ -76,7 +101,6 @@ void DynamicRecurrentOp::Run(const Scope& scope,
stepnet_
->
Run
(
step_scope
,
dev_ctx
);
}
WriteStepOutputs
();
ConcatOutputs
();
}
...
...
@@ -84,11 +108,11 @@ void DynamicRecurrentOp::SplitInputs() const {
// TODO(superjom) make level a config
// TODO(superjom) check all the inputs has the same LoD
int
level
=
0
;
const
auto
&
inlinks
=
cache_
.
inlinks
;
for
(
const
auto
&
item
:
inlinks
)
{
for
(
const
auto
&
item
:
cache_
.
inlinks
)
{
const
auto
&
var
=
item
.
second
;
const
auto
&
tensor
=
var
->
Get
<
LoDTensor
>
();
TensorArray
&
ta
=
step_inputs_
[
item
.
first
];
dy_seq_metas_
[
item
.
first
]
=
ta
.
Unpack
(
tensor
,
level
,
true
/*length_descend*/
);
...
...
@@ -120,17 +144,11 @@ void DynamicRecurrentOp::WriteStepInputs() const {
}
void
DynamicRecurrentOp
::
WriteStepOutputs
()
const
{
for
(
size_t
step
=
0
;
step
<
cache_
.
scopes
->
size
();
step
++
)
{
auto
&
scope
=
cache_
.
GetScope
(
step
);
for
(
auto
&
item
:
step_outputs_
)
{
auto
*
var
=
scope
.
FindVar
(
item
.
first
);
if
(
var
==
nullptr
)
{
var
=
scope
.
NewVar
(
item
.
first
);
}
auto
*
tensor
=
var
->
GetMutable
<
LoDTensor
>
();
item
.
second
.
WriteShared
(
step
,
*
tensor
);
}
// initialize step outputs
for
(
const
auto
&
item
:
cache_
.
outlinks
)
{
step_outputs_
.
emplace
(
item
.
first
,
TensorArray
());
}
PADDLE_ENFORCE_GT
(
step_outputs_
.
size
(),
0UL
);
}
void
DynamicRecurrentOp
::
CreateScopes
()
const
{
...
...
@@ -145,12 +163,18 @@ void DynamicRecurrentOp::CreateScopes() const {
PADDLE_ENFORCE_NOT_NULL
(
stepnet_
,
"stepnet should be set first"
);
std
::
vector
<
std
::
string
>
memories
;
std
::
vector
<
std
::
string
>
pre_memories
;
std
::
vector
<
std
::
string
>
stepnet_outputs
;
std
::
transform
(
arg_
.
memories
.
begin
(),
arg_
.
memories
.
end
(),
std
::
back_inserter
(
memories
),
[](
const
rnn
::
MemoryAttr
&
m
)
{
return
m
.
var
;
});
std
::
transform
(
arg_
.
memories
.
begin
(),
arg_
.
memories
.
end
(),
std
::
back_inserter
(
pre_memories
),
[](
const
rnn
::
MemoryAttr
&
m
)
{
return
m
.
pre_var
;
});
for
(
const
auto
&
item
:
stepnet_
->
Outputs
())
{
for
(
const
auto
&
var
:
item
.
second
)
{
stepnet_outputs
.
push_back
(
var
);
}
}
for
(
size_t
step
=
0
;
step
<
cache_
.
num_steps
;
step
++
)
{
auto
&
scope
=
cache_
.
GetScope
(
step
);
...
...
@@ -158,60 +182,88 @@ void DynamicRecurrentOp::CreateScopes() const {
detail
::
CreateVariables
(
scope
,
arg_
.
outlinks
);
detail
::
CreateVariables
(
scope
,
memories
);
detail
::
CreateVariables
(
scope
,
pre_memories
);
detail
::
CreateVariables
(
scope
,
stepnet_outputs
);
}
}
void
DynamicRecurrentOp
::
ConcatOutputs
()
const
{
// TODO(superjom) transform this to a config
int
level
=
0
;
// TODO(superjom) pass in some lod
// just a placeholder
framework
::
LoD
lod
;
for
(
size_t
step
=
0
;
step
<
cache_
.
num_steps
;
step
++
)
{
auto
&
scope
=
cache_
.
GetScope
(
step
);
for
(
auto
&
item
:
step_outputs_
)
{
auto
*
var
=
scope
.
FindVar
(
item
.
first
);
PADDLE_ENFORCE_NOT_NULL
(
var
);
auto
*
tensor
=
var
->
GetMutable
<
LoDTensor
>
();
tensor
->
mutable_data
<
value_type
>
(
platform
::
CPUPlace
());
item
.
second
.
WriteShared
(
step
,
*
tensor
);
}
}
// the inlinks' lods should be the same, so randomly get one lod.
const
auto
&
some_lod
=
cache_
.
scope
->
FindVar
(
arg_
.
inlinks
.
front
())
->
Get
<
LoDTensor
>
().
lod
();
const
auto
&
some_meta
=
dy_seq_metas_
[
arg_
.
inlinks
.
front
()];
for
(
auto
&
item
:
step_outputs_
)
{
auto
tensor
=
item
.
second
.
Pack
(
level
,
dy_seq_metas_
[
item
.
first
],
lod
);
auto
&
output
=
cache_
.
outlinks
[
item
.
first
]
->
Get
<
LoDTensor
>
();
const_cast
<
LoDTensor
*>
(
&
output
)
->
ShareDataWith
<
value_type
>
(
tensor
);
auto
tensor
=
item
.
second
.
Pack
(
level
,
some_meta
,
some_
lod
);
auto
*
output
=
cache_
.
outlinks
[
item
.
first
]
->
GetMutable
<
LoDTensor
>
();
const_cast
<
LoDTensor
*>
(
output
)
->
ShareDataWith
<
value_type
>
(
tensor
);
}
}
void
DynamicRecurrentOp
::
InitStates
()
const
{
// init the first state
// TODO(superjom) parepare the scenerio that boot state not exists
for
(
auto
memory
:
arg_
.
memories
)
{
auto
*
boot_state_var
=
cache_
.
scope
->
FindVar
(
memory
.
boot_var
);
PADDLE_ENFORCE_NOT_NULL
(
boot_state_var
);
auto
&
boot_state
=
boot_state_var
->
Get
<
LoDTensor
>
();
const
auto
&
dims
=
boot_state
.
dims
();
for
(
size_t
step
=
0
;
step
<
cache_
.
num_steps
;
step
++
)
{
auto
&
cur_scope
=
cache_
.
GetScope
(
step
);
// link pre-state to boot_state
// init state and pre-state
auto
*
pre_state
=
cur_scope
.
FindVar
(
memory
.
pre_var
);
PADDLE_ENFORCE_NOT_NULL
(
pre_state
);
pre_state
->
GetMutable
<
LoDTensor
>
();
auto
*
state
=
cur_scope
.
FindVar
(
memory
.
var
);
PADDLE_ENFORCE_NOT_NULL
(
state
);
state
->
GetMutable
<
LoDTensor
>
()
->
Resize
(
dims
);
state
->
GetMutable
<
LoDTensor
>
()
->
mutable_data
<
value_type
>
(
platform
::
CPUPlace
());
if
(
step
==
0
)
{
auto
*
pre_state_tensor
=
pre_state
->
GetMutable
<
LoDTensor
>
();
pre_state_tensor
->
Resize
(
boot_state
.
dims
());
pre_state_tensor
->
ShareDataWith
<
value_type
>
(
boot_state
);
}
else
{
auto
&
pre_scope
=
cache_
.
GetScope
(
step
-
1
);
auto
*
state_pre
=
pre_scope
.
FindVar
(
memory
.
var
);
PADDLE_ENFORCE_NOT_NULL
(
state_pre
);
pre_state
->
GetMutable
<
LoDTensor
>
()
->
ShareDataWith
<
value_type
>
(
*
state_pre
->
GetMutable
<
LoDTensor
>
());
}
for
(
size_t
step
=
0
;
step
<
cache_
.
num_steps
;
step
++
)
{
for
(
const
auto
&
memory
:
arg_
.
memories
)
{
CreateState
(
memory
,
step
);
LinkState
(
memory
,
step
);
}
}
}
void
DynamicRecurrentOp
::
CreateState
(
const
rnn
::
MemoryAttr
&
memory
,
size_t
step
)
const
{
auto
&
scope
=
cache_
.
GetScope
(
step
);
auto
&
state
=
*
cache_
.
GetTensor
(
scope
,
memory
.
var
);
auto
&
boot_state
=
*
cache_
.
GetTensor
(
*
cache_
.
scope
,
memory
.
boot_var
);
size_t
num_instances
=
step_inputs_
[
arg_
.
inlinks
.
front
()].
Read
(
step
).
dims
()[
0
];
auto
dims
=
boot_state
.
dims
();
dims
[
0
]
=
num_instances
;
state
.
Resize
(
dims
);
state
.
mutable_data
<
value_type
>
(
platform
::
CPUPlace
());
states_
[
memory
.
var
].
WriteShared
(
step
,
state
);
}
void
DynamicRecurrentOp
::
LinkState
(
const
rnn
::
MemoryAttr
&
memory
,
size_t
step
)
const
{
auto
&
scope
=
cache_
.
GetScope
(
step
);
auto
&
state_pre
=
*
cache_
.
GetTensor
(
scope
,
memory
.
pre_var
);
// all the step_inputs' metas should be the same, just randomly select one
// and get the dyseq meta.
const
auto
&
some_meta
=
dy_seq_metas_
[
arg_
.
inlinks
.
front
()];
size_t
num_instances
=
step_inputs_
[
arg_
.
inlinks
.
front
()].
Read
(
step
).
dims
()[
0
];
LoDTensor
*
pre_state
{
nullptr
};
if
(
step
==
0
)
{
pre_state
=
cache_
.
GetTensor
(
*
cache_
.
scope
,
memory
.
boot_var
);
pre_state
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
// allocate memory
state_pre
.
Resize
(
pre_state
->
dims
());
state_pre
.
mutable_data
<
value_type
>
(
platform
::
CPUPlace
());
detail
::
ReorderBootState
<
value_type
>
(
some_meta
,
*
pre_state
,
&
state_pre
,
pre_state
->
place
());
}
else
{
pre_state
=
cache_
.
GetTensor
(
cache_
.
GetScope
(
step
-
1
),
memory
.
var
);
}
// shink and share from previous state
auto
shrinked_pre_state
=
pre_state
->
Slice
<
value_type
>
(
0
,
num_instances
);
state_pre
.
ShareDataWith
<
value_type
>
(
shrinked_pre_state
);
}
void
DynamicRecurrentOp
::
ArgCache
::
Init
(
const
rnn
::
ArgumentName
&
name
,
const
paddle
::
framework
::
OperatorBase
&
op
,
const
paddle
::
framework
::
Scope
&
scope
,
rnn
::
Argument
*
arg
)
{
...
...
@@ -261,6 +313,12 @@ Variable* DynamicRecurrentOp::ArgCache::GetVariable(const Scope& scope,
return
var
;
}
LoDTensor
*
DynamicRecurrentOp
::
ArgCache
::
GetTensor
(
const
framework
::
Scope
&
scope
,
const
std
::
string
&
name
)
{
auto
*
var
=
GetVariable
(
scope
,
name
);
return
var
->
GetMutable
<
LoDTensor
>
();
}
const
rnn
::
ArgumentName
DynamicRecurrentOp
::
kArgName
{
"step_net"
,
"step_scopes"
,
"inlinks"
,
"outlinks"
,
"memories"
,
"pre_memories"
,
"boot_memories"
};
...
...
paddle/operators/dynamic_recurrent_op.h
浏览文件 @
1c1f73b4
...
...
@@ -77,6 +77,17 @@ class DynamicRecurrentOp : public framework::OperatorBase {
*/
void
InitStates
()
const
;
/*
* Create state variables for each time step.
*/
void
CreateState
(
const
rnn
::
MemoryAttr
&
memory
,
size_t
step
)
const
;
/*
* Link pre-state variable in current scope to the state variable in the
* previous time step (scope).
*/
void
LinkState
(
const
rnn
::
MemoryAttr
&
memory
,
size_t
step
)
const
;
/*
* Concatenate outputs in each time step and generate a LoDTensor.
*/
...
...
@@ -91,6 +102,16 @@ class DynamicRecurrentOp : public framework::OperatorBase {
}
const
OperatorBase
&
GetStepNet
()
const
{
return
*
stepnet_
;
}
const
framework
::
TensorArray
&
state
(
const
std
::
string
&
name
)
const
{
return
states_
[
name
];
}
const
framework
::
TensorArray
&
step_input
(
const
std
::
string
&
name
)
const
{
return
step_inputs_
[
name
];
}
const
framework
::
TensorArray
&
step_output
(
const
std
::
string
&
name
)
const
{
return
step_outputs_
[
name
];
}
protected:
struct
ArgCache
{
framework
::
Scope
const
*
scope
;
...
...
@@ -108,6 +129,9 @@ class DynamicRecurrentOp : public framework::OperatorBase {
return
*
scopes
->
at
(
index
);
}
framework
::
LoDTensor
*
GetTensor
(
const
framework
::
Scope
&
scope
,
const
std
::
string
&
name
);
private:
void
InitArgument
(
const
rnn
::
ArgumentName
&
name
,
const
OperatorBase
&
op
,
rnn
::
Argument
*
arg
);
...
...
@@ -122,7 +146,7 @@ class DynamicRecurrentOp : public framework::OperatorBase {
private:
std
::
unique_ptr
<
OperatorBase
>
stepnet_
;
mutable
framework
::
TensorArray
states_
;
mutable
std
::
map
<
std
::
string
,
framework
::
TensorArray
>
states_
;
mutable
std
::
map
<
std
::
string
,
framework
::
TensorArray
>
step_inputs_
;
mutable
std
::
map
<
std
::
string
,
framework
::
TensorArray
>
step_outputs_
;
mutable
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
DySeqMeta
>>
...
...
paddle/operators/dynamic_recurrent_op_test.cc
浏览文件 @
1c1f73b4
...
...
@@ -87,7 +87,6 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test {
platform
::
CPUPlace
place
;
scope
.
NewVar
(
"step_scopes"
);
CreateVar
(
scope
,
"boot_mem"
,
framework
::
make_ddim
({
10
,
20
}),
place
);
// auto* out0 =
CreateVar
(
scope
,
"out0"
,
framework
::
make_ddim
({
10
,
20
}),
place
);
auto
*
in0
=
CreateVar
(
scope
,
"in0"
,
framework
::
make_ddim
({
10
,
8
}),
place
);
// 10 instanes with 4 sentences, length is 4, 3, 2, 1 respectively.
...
...
paddle/operators/sum_op.cc
浏览文件 @
1c1f73b4
...
...
@@ -34,7 +34,7 @@ class SumOp : public framework::OperatorWithKernel {
auto
in_dim
=
x_dims
[
0
];
for
(
size_t
i
=
1
;
i
<
N
;
i
++
)
{
auto
dim
=
x_dims
[
i
];
PADDLE_ENFORCE
(
in_dim
==
dim
,
"Input tensors must have same shape"
);
PADDLE_ENFORCE
_EQ
(
in_dim
,
dim
,
"Input tensors must have same shape"
);
}
ctx
->
SetOutputDim
(
"Out"
,
in_dim
);
ctx
->
ShareLoD
(
"X"
,
/*->*/
"Out"
);
...
...
paddle/pybind/pybind.cc
浏览文件 @
1c1f73b4
...
...
@@ -18,6 +18,7 @@ limitations under the License. */
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/tensor_array.h"
#include "paddle/operators/cond_op.h"
#include "paddle/operators/dynamic_recurrent_op.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/recurrent_op.h"
#include "paddle/platform/enforce.h"
...
...
@@ -341,6 +342,33 @@ All parameter, weight, gradient are variables in Paddle.
self
.
set_stepnet
(
net
.
Clone
());
});
py
::
class_
<
operators
::
DynamicRecurrentOp
,
OperatorBase
>
(
m
,
"DynamicRecurrentOp"
)
.
def_static
(
"create"
,
[](
py
::
bytes
protobin
)
->
operators
::
DynamicRecurrentOp
*
{
OpDesc
desc
;
PADDLE_ENFORCE
(
desc
.
ParsePartialFromString
(
protobin
),
"Cannot parse user input to OpDesc"
);
PADDLE_ENFORCE
(
desc
.
IsInitialized
(),
"User OpDesc is not initialized, reason %s"
,
desc
.
InitializationErrorString
());
auto
rnn_op
=
OpRegistry
::
CreateOp
(
desc
);
return
static_cast
<
operators
::
DynamicRecurrentOp
*>
(
rnn_op
.
release
());
})
.
def
(
"set_stepnet"
,
[](
operators
::
DynamicRecurrentOp
&
self
,
const
operators
::
NetOp
&
net
)
->
void
{
self
.
SetStepNet
(
net
.
Clone
());
})
.
def
(
"get_state"
,
[](
operators
::
DynamicRecurrentOp
&
self
,
const
std
::
string
&
name
)
->
const
TensorArray
&
{
return
self
.
state
(
name
);
})
.
def
(
"get_step_input"
,
[](
operators
::
DynamicRecurrentOp
&
self
,
const
std
::
string
&
name
)
->
const
TensorArray
&
{
return
self
.
step_input
(
name
);
})
.
def
(
"get_step_output"
,
[](
operators
::
DynamicRecurrentOp
&
self
,
const
std
::
string
&
name
)
->
const
TensorArray
&
{
return
self
.
step_output
(
name
);
});
// cond_op
py
::
class_
<
operators
::
CondOp
,
OperatorBase
>
(
m
,
"CondOp"
)
.
def_static
(
"create"
,
...
...
python/paddle/v2/framework/op.py
浏览文件 @
1c1f73b4
...
...
@@ -219,6 +219,27 @@ class __RecurrentOp__(object):
return
core
.
RecurrentOp
.
create
(
proto
.
SerializeToString
())
class
__DynamicRecurrentOp__
(
object
):
__proto__
=
None
type
=
"dynamic_recurrent"
def
__init__
(
self
):
# cache recurrent_op's proto
if
self
.
__proto__
is
None
:
for
op_proto
in
get_all_op_protos
():
if
op_proto
.
type
==
self
.
type
:
self
.
__proto__
=
op_proto
def
__call__
(
self
,
*
args
,
**
kwargs
):
if
self
.
type
not
in
args
and
"type"
not
in
kwargs
:
kwargs
[
"type"
]
=
self
.
type
# create proto
create_method
=
OpDescCreationMethod
(
self
.
__proto__
)
proto
=
create_method
(
*
args
,
**
kwargs
)
# create rnnop
return
core
.
DynamicRecurrentOp
.
create
(
proto
.
SerializeToString
())
class
__CondOp__
(
object
):
__proto__
=
None
type
=
"cond"
...
...
@@ -242,4 +263,5 @@ class __CondOp__(object):
Operator
=
OperatorFactory
()
# The default global factory
RecurrentOp
=
__RecurrentOp__
()
DynamicRecurrentOp
=
__DynamicRecurrentOp__
()
CondOp
=
__CondOp__
()
python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py
0 → 100644
浏览文件 @
1c1f73b4
import
logging
import
paddle.v2.framework.core
as
core
import
unittest
from
paddle.v2.framework.op
import
Operator
,
DynamicRecurrentOp
import
numpy
as
np
def
create_tensor
(
scope
,
name
,
shape
,
np_data
):
tensor
=
scope
.
new_var
(
name
).
get_tensor
()
tensor
.
set_dims
(
shape
)
tensor
.
set
(
np_data
,
core
.
CPUPlace
())
return
tensor
class
DynamicRecurrentOpTest
(
unittest
.
TestCase
):
'''
Test RNNOp
equation:
h_t = \sigma (W x_t + U h_{t-1})
weights:
- W
- U
vars:
- x
memories:
- h
outputs:
- h
'''
# for siplicity, just one level LoD
lod_py
=
[[
0
,
4
,
7
,
9
,
10
]]
input_dim
=
30
num_sents
=
len
(
lod_py
[
0
])
-
1
weight_dim
=
15
def
forward
(
self
):
self
.
scope
=
core
.
Scope
()
self
.
create_global_variables
()
self
.
create_rnn_op
()
self
.
create_step_net
()
ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
self
.
rnnop
.
run
(
self
.
scope
,
ctx
)
state
=
self
.
rnnop
.
get_state
(
"h@mem"
)
print
'state size: '
,
state
.
size
()
step_inputs
=
self
.
rnnop
.
get_step_input
(
"x"
)
print
"x size "
,
step_inputs
.
size
()
for
i
in
range
(
step_inputs
.
size
()):
print
"x %d"
%
i
,
np
.
array
(
step_inputs
.
read
(
i
).
get_dims
())
step_outputs
=
self
.
rnnop
.
get_step_output
(
'h@mem'
)
print
'step_outputs.size '
,
step_outputs
.
size
()
output
=
self
.
scope
.
find_var
(
"h@mem"
).
get_tensor
()
print
'output'
,
np
.
array
(
output
).
shape
def
create_global_variables
(
self
):
x
=
np
.
random
.
normal
(
size
=
(
self
.
lod_py
[
0
][
-
1
],
self
.
input_dim
)).
astype
(
"float32"
)
W
=
np
.
random
.
normal
(
size
=
(
self
.
input_dim
,
self
.
input_dim
)).
astype
(
"float32"
)
U
=
np
.
random
.
normal
(
size
=
(
self
.
input_dim
,
self
.
input_dim
)).
astype
(
"float32"
)
h_boot
=
np
.
random
.
normal
(
size
=
(
self
.
num_sents
,
self
.
input_dim
)).
astype
(
"float32"
)
# create inlink
x_tensor
=
create_tensor
(
self
.
scope
,
"x"
,
[
self
.
num_sents
,
self
.
input_dim
],
x
)
x_tensor
.
set_lod
(
self
.
lod_py
)
create_tensor
(
self
.
scope
,
"W"
,
[
self
.
input_dim
,
self
.
input_dim
],
W
)
create_tensor
(
self
.
scope
,
"U"
,
[
self
.
input_dim
,
self
.
input_dim
],
U
)
create_tensor
(
self
.
scope
,
"h_boot"
,
[
self
.
num_sents
,
self
.
input_dim
],
h_boot
)
self
.
scope
.
new_var
(
"step_scopes"
)
self
.
scope
.
new_var
(
"h@mem"
)
def
create_rnn_op
(
self
):
# create RNNOp
self
.
rnnop
=
DynamicRecurrentOp
(
# inputs
inlinks
=
[
"x"
],
boot_memories
=
[
"h_boot"
],
step_net
=
"stepnet"
,
# outputs
outlinks
=
[
"h@mem"
],
step_scopes
=
"step_scopes"
,
# attributes
pre_memories
=
[
"h@pre"
],
memories
=
[
"h@mem"
])
def
create_step_net
(
self
):
stepnet
=
core
.
Net
.
create
()
x_fc_op
=
Operator
(
"mul"
,
X
=
"x"
,
Y
=
"W"
,
Out
=
"Wx"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"sum"
,
X
=
[
"Wx"
,
"Uh"
],
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@mem"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
stepnet
.
append_op
(
op
)
stepnet
.
complete_add_op
(
True
)
self
.
rnnop
.
set_stepnet
(
stepnet
)
def
test_forward
(
self
):
print
'test recurrent op forward'
pd_output
=
self
.
forward
()
print
'pd_output'
,
pd_output
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录