提交 5d97c0d8 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #1379 from luotao1/seqconcat

add SequenceConcatLayer in trainer_config_helpers
......@@ -279,6 +279,12 @@ concat_layer
:members: concat_layer
:noindex:
seq_concat_layer
----------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: seq_concat_layer
:noindex:
Reshaping Layers
================
......
......@@ -21,9 +21,11 @@ namespace paddle {
/**
* A layer for concatenating the first sequence with the second sequence
* following the first
* Input: two sequences each containing some instances
* Input: two sequences each containing the same number of instances
* seq1 = [a1, a2, ..., an]
* seq2 = [b1, b2, ..., bn]
* Output: a concatenated sequence of the two input sequences
* out = [a1, b1, a2, b2, ..., an, bn]
*/
class SequenceConcatLayer : public Layer {
......
......@@ -59,6 +59,7 @@ __all__ = [
'img_cmrnorm_layer',
'addto_layer',
'concat_layer',
'seq_concat_layer',
'lstm_step_layer',
'recurrent_group',
'memory',
......@@ -144,6 +145,7 @@ class LayerType(object):
CONCAT_LAYER = 'concat'
CONCAT_PROJ_LAYER = 'concat2'
SEQUENCE_CONCAT_LAYER = 'seqconcat'
LSTM_STEP_LAYER = 'lstm_step'
GRU_STEP_LAYER = 'gru_step'
......@@ -2570,6 +2572,59 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
size=sz)
@wrap_name_default("seqconcat")
@wrap_act_default(act=IdentityActivation())
@wrap_bias_attr_default(has_bias=False)
@layer_support()
def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
bias_attr=None):
"""
Concat sequence a with sequence b.
Inputs:
- a = [a1, a2, ..., an]
- b = [b1, b2, ..., bn]
- Note that the length of a and b should be the same.
Output: [a1, b1, a2, b2, ..., an, bn]
The example usage is:
.. code-block:: python
concat = seq_concat_layer(a=layer1, b=layer2)
:param name: Layer name.
:type name: basestring
:param a: input sequence layer
:type a: LayerOutput
:param b: input sequence layer
:type b: LayerOutput
:param act: Activation type.
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(a, LayerOutput) and isinstance(b, LayerOutput)
assert a.size == b.size
Layer(
name=name,
type=LayerType.SEQUENCE_CONCAT_LAYER,
inputs=[a.name, b.name],
active_type=act.name,
bias=ParamAttr.to_bias(bias_attr),
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
layer_type=LayerType.SEQUENCE_CONCAT_LAYER,
parents=[a, b],
activation=act,
size=a.size)
def memory(name,
size,
is_seq=False,
......
......@@ -4,6 +4,7 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer
last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops)
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat)
export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "data1"
type: "data"
size: 30
active_type: ""
}
layers {
name: "data2"
type: "data"
size: 30
active_type: ""
}
layers {
name: "__seqconcat_0__"
type: "seqconcat"
size: 30
active_type: ""
inputs {
input_layer_name: "data1"
}
inputs {
input_layer_name: "data2"
}
}
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
sub_models {
name: "root"
layer_names: "data1"
layer_names: "data2"
layer_names: "__seqconcat_0__"
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
din1 = data_layer(name='data1', size=30)
din2 = data_layer(name='data2', size=30)
outputs(seq_concat_layer(a=din1, b=din2))
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册