Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
5d97c0d8
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5d97c0d8
编写于
2月 21, 2017
作者:
T
Tao Luo
提交者:
GitHub
2月 21, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1379 from luotao1/seqconcat
add SequenceConcatLayer in trainer_config_helpers
上级
037e0a06
da8106ef
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
115 addition
and
3 deletion
+115
-3
doc/api/trainer_config_helpers/layers.rst
doc/api/trainer_config_helpers/layers.rst
+6
-0
paddle/gserver/layers/SequenceConcatLayer.cpp
paddle/gserver/layers/SequenceConcatLayer.cpp
+4
-2
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+55
-0
python/paddle/trainer_config_helpers/tests/configs/file_list.sh
.../paddle/trainer_config_helpers/tests/configs/file_list.sh
+2
-1
python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_concat.protostr
...g_helpers/tests/configs/protostr/test_seq_concat.protostr
+39
-0
python/paddle/trainer_config_helpers/tests/configs/test_seq_concat.py
...e/trainer_config_helpers/tests/configs/test_seq_concat.py
+9
-0
未找到文件。
doc/api/trainer_config_helpers/layers.rst
浏览文件 @
5d97c0d8
...
...
@@ -279,6 +279,12 @@ concat_layer
:members: concat_layer
:noindex:
seq_concat_layer
----------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: seq_concat_layer
:noindex:
Reshaping Layers
================
...
...
paddle/gserver/layers/SequenceConcatLayer.cpp
浏览文件 @
5d97c0d8
...
...
@@ -21,9 +21,11 @@ namespace paddle {
/**
* A layer for concatenating the first sequence with the second sequence
* following the first
* Input: two sequences each containing some instances
* Input: two sequences each containing the same number of instances
* seq1 = [a1, a2, ..., an]
* seq2 = [b1, b2, ..., bn]
* Output: a concatenated sequence of the two input sequences
* out = [a1, b1, a2, b2, ..., an, bn]
*/
class
SequenceConcatLayer
:
public
Layer
{
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
5d97c0d8
...
...
@@ -59,6 +59,7 @@ __all__ = [
'img_cmrnorm_layer'
,
'addto_layer'
,
'concat_layer'
,
'seq_concat_layer'
,
'lstm_step_layer'
,
'recurrent_group'
,
'memory'
,
...
...
@@ -144,6 +145,7 @@ class LayerType(object):
CONCAT_LAYER
=
'concat'
CONCAT_PROJ_LAYER
=
'concat2'
SEQUENCE_CONCAT_LAYER
=
'seqconcat'
LSTM_STEP_LAYER
=
'lstm_step'
GRU_STEP_LAYER
=
'gru_step'
...
...
@@ -2570,6 +2572,59 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
size
=
sz
)
@
wrap_name_default
(
"seqconcat"
)
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
layer_support
()
def
seq_concat_layer
(
a
,
b
,
act
=
None
,
name
=
None
,
layer_attr
=
None
,
bias_attr
=
None
):
"""
Concat sequence a with sequence b.
Inputs:
- a = [a1, a2, ..., an]
- b = [b1, b2, ..., bn]
- Note that the length of a and b should be the same.
Output: [a1, b1, a2, b2, ..., an, bn]
The example usage is:
.. code-block:: python
concat = seq_concat_layer(a=layer1, b=layer2)
:param name: Layer name.
:type name: basestring
:param a: input sequence layer
:type a: LayerOutput
:param b: input sequence layer
:type b: LayerOutput
:param act: Activation type.
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert
isinstance
(
a
,
LayerOutput
)
and
isinstance
(
b
,
LayerOutput
)
assert
a
.
size
==
b
.
size
Layer
(
name
=
name
,
type
=
LayerType
.
SEQUENCE_CONCAT_LAYER
,
inputs
=
[
a
.
name
,
b
.
name
],
active_type
=
act
.
name
,
bias
=
ParamAttr
.
to_bias
(
bias_attr
),
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
return
LayerOutput
(
name
,
layer_type
=
LayerType
.
SEQUENCE_CONCAT_LAYER
,
parents
=
[
a
,
b
],
activation
=
act
,
size
=
a
.
size
)
def
memory
(
name
,
size
,
is_seq
=
False
,
...
...
python/paddle/trainer_config_helpers/tests/configs/file_list.sh
浏览文件 @
5d97c0d8
...
...
@@ -4,6 +4,7 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer
last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
)
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat
)
export
whole_configs
=(
test_split_datasource
)
python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_concat.protostr
0 → 100644
浏览文件 @
5d97c0d8
type: "nn"
layers {
name: "data1"
type: "data"
size: 30
active_type: ""
}
layers {
name: "data2"
type: "data"
size: 30
active_type: ""
}
layers {
name: "__seqconcat_0__"
type: "seqconcat"
size: 30
active_type: ""
inputs {
input_layer_name: "data1"
}
inputs {
input_layer_name: "data2"
}
}
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
sub_models {
name: "root"
layer_names: "data1"
layer_names: "data2"
layer_names: "__seqconcat_0__"
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
is_recurrent_layer_group: false
}
python/paddle/trainer_config_helpers/tests/configs/test_seq_concat.py
0 → 100644
浏览文件 @
5d97c0d8
from
paddle.trainer_config_helpers
import
*
settings
(
batch_size
=
1000
,
learning_rate
=
1e-5
)
din1
=
data_layer
(
name
=
'data1'
,
size
=
30
)
din2
=
data_layer
(
name
=
'data2'
,
size
=
30
)
outputs
(
seq_concat_layer
(
a
=
din1
,
b
=
din2
))
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录