Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
73af1942
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
73af1942
编写于
3月 01, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add the implementation of rnn by yuyang
上级
e9cd3867
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
97 addition
and
56 deletion
+97
-56
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+1
-1
python/paddle/v2/layer.py
python/paddle/v2/layer.py
+96
-55
未找到文件。
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
73af1942
...
@@ -822,7 +822,7 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
...
@@ -822,7 +822,7 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
return
LayerOutput
(
name
,
LayerType
.
DATA
,
size
=
size
)
return
LayerOutput
(
name
,
LayerType
.
DATA
,
size
=
size
)
@
wrap_name_default
(
"embedding"
)
@
wrap_name_default
(
"embedding
_layer
"
)
@
wrap_param_attr_default
()
@
wrap_param_attr_default
()
@
layer_support
(
ERROR_CLIPPING
)
@
layer_support
(
ERROR_CLIPPING
)
def
embedding_layer
(
input
,
size
,
name
=
None
,
param_attr
=
None
,
layer_attr
=
None
):
def
embedding_layer
(
input
,
size
,
name
=
None
,
param_attr
=
None
,
layer_attr
=
None
):
...
...
python/paddle/v2/layer.py
浏览文件 @
73af1942
...
@@ -76,6 +76,9 @@ from paddle.trainer_config_helpers.default_decorators import \
...
@@ -76,6 +76,9 @@ from paddle.trainer_config_helpers.default_decorators import \
wrap_bias_attr_default
wrap_bias_attr_default
from
paddle.trainer_config_helpers.default_decorators
import
wrap_name_default
from
paddle.trainer_config_helpers.default_decorators
import
wrap_name_default
from
paddle.trainer_config_helpers.layers
import
layer_support
from
paddle.trainer_config_helpers.layers
import
layer_support
from
paddle.trainer.config_parser
import
\
RecurrentLayerGroupWithoutOutLinksBegin
,
RecurrentLayerGroupSetOutLink
,
\
RecurrentLayerGroupEnd
,
model_type
import
activation
import
activation
import
data_type
import
data_type
...
@@ -126,21 +129,28 @@ class Layer(object):
...
@@ -126,21 +129,28 @@ class Layer(object):
self
.
__parent_layers__
[
layer_name
])
self
.
__parent_layers__
[
layer_name
])
kwargs
[
layer_name
]
=
v1_layer
kwargs
[
layer_name
]
=
v1_layer
if
self
.
name
is
None
:
if
self
.
context_name
()
is
None
:
return
self
.
to_proto_impl
(
**
kwargs
)
return
self
.
to_proto_impl
(
**
kwargs
)
elif
isinstance
(
self
,
MemoryV2
):
elif
isinstance
(
self
,
MemoryV2
):
name
=
self
.
name
+
"#__memory__"
name
=
self
.
name
+
"#__memory__"
if
name
not
in
context
:
if
name
not
in
context
:
context
[
name
]
=
self
.
to_proto_impl
(
**
kwargs
)
context
[
name
]
=
self
.
to_proto_impl
(
**
kwargs
)
return
context
[
name
]
return
context
[
name
]
elif
self
.
context_name
()
not
in
context
:
if
self
.
name
not
in
context
:
context
[
self
.
context_name
()]
=
self
.
to_proto_impl
(
**
kwargs
)
context
[
self
.
name
]
=
self
.
to_proto_impl
(
**
kwargs
)
return
context
[
self
.
name
]
return
context
[
self
.
name
]
def
to_proto_impl
(
self
,
**
kwargs
):
def
to_proto_impl
(
self
,
**
kwargs
):
raise
NotImplementedError
()
raise
NotImplementedError
()
def
context_name
(
self
):
"""
Context name means the context which stores `to_proto_impl` result.
If multiple layer share same context_name, the `to_proto_impl` of them
will be invoked only once.
"""
return
self
.
name
def
__convert_to_v2__
(
method_name
,
parent_names
,
is_default_name
=
True
):
def
__convert_to_v2__
(
method_name
,
parent_names
,
is_default_name
=
True
):
if
is_default_name
:
if
is_default_name
:
...
@@ -231,6 +241,9 @@ class MemoryV2(Layer):
...
@@ -231,6 +241,9 @@ class MemoryV2(Layer):
return
conf_helps
.
memory
(
name
=
self
.
name
,
size
=
self
.
size
,
**
args
)
return
conf_helps
.
memory
(
name
=
self
.
name
,
size
=
self
.
size
,
**
args
)
def
context_name
(
self
):
return
self
.
name
+
"#memory"
class
LayerOutputV2
(
Layer
):
class
LayerOutputV2
(
Layer
):
"""
"""
...
@@ -249,60 +262,20 @@ class LayerOutputV2(Layer):
...
@@ -249,60 +262,20 @@ class LayerOutputV2(Layer):
class
StaticInputV2
(
Layer
):
class
StaticInputV2
(
Layer
):
def
__init__
(
self
,
**
kwargs
):
def
__init__
(
self
,
input
=
None
,
**
kwargs
):
self
.
__parent_names__
=
[
'input'
]
assert
input
is
not
None
other_kwargs
=
dict
()
self
.
__kwargs__
=
kwargs
parent_layers
=
dict
()
super
(
StaticInputV2
,
self
).
__init__
(
for
pname
in
self
.
__parent_names__
:
name
=
input
.
name
,
parent_layers
=
{
'input'
:
input
})
if
kwargs
.
has_key
(
pname
):
parent_layers
[
pname
]
=
kwargs
[
pname
]
for
key
in
kwargs
.
keys
():
if
key
not
in
self
.
__parent_names__
:
other_kwargs
[
key
]
=
kwargs
[
key
]
self
.
__kwargs__
=
other_kwargs
super
(
StaticInputV2
,
self
).
__init__
(
parent_layers
=
parent_layers
)
def
to_proto_impl
(
self
,
**
kwargs
):
args
=
dict
()
for
each
in
kwargs
:
args
[
each
]
=
kwargs
[
each
]
for
each
in
self
.
__kwargs__
:
args
[
each
]
=
self
.
__kwargs__
[
each
]
return
conf_helps
.
StaticInput
(
**
args
)
class
RecurrentGroupV2
(
Layer
):
def
__init__
(
self
,
name
,
**
kwargs
):
self
.
__parent_names__
=
[
'input'
,
'boot_layer'
]
other_kwargs
=
dict
()
parent_layers
=
dict
()
for
pname
in
self
.
__parent_names__
:
if
kwargs
.
has_key
(
pname
):
parent_layers
[
pname
]
=
kwargs
[
pname
]
for
key
in
kwargs
.
keys
():
if
key
not
in
self
.
__parent_names__
:
other_kwargs
[
key
]
=
kwargs
[
key
]
self
.
__kwargs__
=
other_kwargs
super
(
RecurrentGroupV2
,
self
).
__init__
(
name
=
name
,
parent_layers
=
parent_layers
)
wrapper
=
wrap_name_default
(
name_prefix
=
'recurrent_group'
)
def
context_name
(
self
):
__init__
=
wrapper
(
__init__
)
return
self
.
name
+
"#static_input"
def
to_proto_impl
(
self
,
**
kwargs
):
def
to_proto_impl
(
self
,
**
kwargs
):
def
in_args_converter
(
*
in_args
):
if
not
isinstance
(
in_args
,
collections
.
Sequence
):
in_args
=
[
in_args
]
return
[
LayerOutputV2
(
input
)
for
input
in
in_args
]
args
=
dict
()
args
=
dict
()
for
each
in
kwargs
:
args
.
update
(
kwargs
)
args
[
each
]
=
kwargs
[
each
]
args
.
update
(
self
.
__kwargs__
)
for
each
in
self
.
__kwargs__
:
return
conf_helps
.
StaticInput
(
**
args
)
args
[
each
]
=
self
.
__kwargs__
[
each
]
return
conf_helps
.
recurrent_group
(
name
=
self
.
name
,
in_args_converter
=
in_args_converter
,
**
args
)
class
MixedLayerV2
(
Layer
):
class
MixedLayerV2
(
Layer
):
...
@@ -377,11 +350,79 @@ def mixed(size=0,
...
@@ -377,11 +350,79 @@ def mixed(size=0,
return
MixedLayerV2
(
size
,
input
,
name
,
act
,
bias_attr
,
layer_attr
)
return
MixedLayerV2
(
size
,
input
,
name
,
act
,
bias_attr
,
layer_attr
)
class
RecurrentLayerInput
(
Layer
):
def
__init__
(
self
,
recurrent_name
,
index
,
parent_layers
):
assert
len
(
parent_layers
)
==
1
self
.
__parents__
=
parent_layers
.
values
()[
0
]
print
self
.
__parents__
,
parent_layers
super
(
RecurrentLayerInput
,
self
).
__init__
(
name
=
self
.
__parents__
[
index
].
name
,
parent_layers
=
parent_layers
)
self
.
__recurrent_name__
=
recurrent_name
def
context_name
(
self
):
return
self
.
__recurrent_name__
+
".begin"
def
to_proto_impl
(
self
,
**
kwargs
):
model_type
(
'recurrent_nn'
)
RecurrentLayerGroupWithoutOutLinksBegin
(
name
=
self
.
__recurrent_name__
,
in_links
=
map
(
lambda
x
:
x
.
name
,
self
.
__parents__
))
return
self
class
RecurrentLayerOutput
(
Layer
):
def
__init__
(
self
,
recurrent_name
,
index
,
parent_layers
):
assert
len
(
parent_layers
)
==
1
self
.
__parents__
=
parent_layers
.
values
()[
0
]
super
(
RecurrentLayerOutput
,
self
).
__init__
(
name
=
self
.
__parents__
[
index
].
name
,
parent_layers
=
parent_layers
)
self
.
__recurrent_name__
=
recurrent_name
def
context_name
(
self
):
return
self
.
__recurrent_name__
+
".end"
def
to_proto_impl
(
self
,
**
kwargs
):
for
l
in
self
.
__parents__
:
RecurrentLayerGroupSetOutLink
(
l
.
name
)
RecurrentLayerGroupEnd
(
name
=
self
.
__recurrent_name__
)
@
wrap_name_default
()
def
recurrent_group
(
step
,
input
,
name
=
None
):
if
not
isinstance
(
input
,
collections
.
Sequence
):
input
=
[
input
]
actual_input
=
[
RecurrentLayerInput
(
recurrent_name
=
name
,
index
=
i
,
parent_layers
=
{
'recurrent_inputs'
:
input
})
for
i
in
xrange
(
len
(
input
))
]
actual_output
=
step
(
*
actual_input
)
if
not
isinstance
(
actual_output
,
collections
.
Sequence
):
actual_output
=
[
actual_output
]
retv
=
[
RecurrentLayerOutput
(
recurrent_name
=
name
,
index
=
i
,
parent_layers
=
{
'recurrent_outputs'
:
actual_output
})
for
i
in
xrange
(
len
(
actual_output
))
]
if
len
(
retv
)
==
1
:
return
retv
[
0
]
else
:
return
retv
LayerV2
=
Layer
LayerV2
=
Layer
data
=
DataLayerV2
data
=
DataLayerV2
AggregateLevel
=
conf_helps
.
layers
.
AggregateLevel
AggregateLevel
=
conf_helps
.
layers
.
AggregateLevel
ExpandLevel
=
conf_helps
.
layers
.
ExpandLevel
ExpandLevel
=
conf_helps
.
layers
.
ExpandLevel
recurrent_group
=
RecurrentGroupV2
recurrent_group
=
recurrent_group
memory
=
MemoryV2
memory
=
MemoryV2
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录