提交 0b673756 编写于 作者: L Luo Tao

add SequenceReshapeLayer in trainer_config_helpers

上级 14ee4b80
......@@ -308,6 +308,12 @@ repeat_layer
:members: repeat_layer
:noindex:
seq_reshape_layer
-----------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: seq_reshape_layer
:noindex:
Math Layers
===========
......
......@@ -20,9 +20,12 @@ limitations under the License. */
namespace paddle {
/**
* A layer for reshaping the sequence
* Input: a sequence
* Output: a sequence
* A layer for reshaping the sequence. Assume the input sequence has
* T instances, the dimension of each instance is M, and the input
* reshape_dim is N, then the output sequence has T*M/N instances,
* the dimension of each instance is N.
*
* Note that T*M/N must be an integer.
*/
class SequenceReshapeLayer : public Layer {
......
......@@ -37,6 +37,7 @@ __all__ = [
"dotmul_projection",
"dotmul_operator",
"repeat_layer",
"seq_reshape_layer",
"table_projection",
"mixed_layer",
"data_layer",
......@@ -125,6 +126,7 @@ class LayerType(object):
GRUMEMORY = "gated_recurrent"
SEQUENCE_LAST_INSTANCE = "seqlastins"
SEQUENCE_FIRST_INSTANCE = "seqfirstins"
SEQUENCE_RESHAPE = "seqreshape"
POOLING_MAX = "max"
POOLING_AVG = 'average'
FC_LAYER = "fc"
......@@ -1450,6 +1452,61 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None):
parents=[input])
@wrap_name_default("seqreshape")
@wrap_act_default(act=IdentityActivation())
@wrap_bias_attr_default(has_bias=False)
@layer_support()
def seq_reshape_layer(input,
reshape_size,
act=None,
name=None,
layer_attr=None,
bias_attr=None):
"""
A layer for reshaping the sequence. Assume the input sequence has T instances,
the dimension of each instance is M, and the input reshape_size is N, then the
output sequence has T*M/N instances, the dimension of each instance is N.
Note that T*M/N must be an integer.
The example usage is:
.. code-block:: python
reshape = seq_reshape_layer(input=layer, reshape_size=4)
:param input: Input layer.
:type input: LayerOutput
:param reshape_size: the size of reshaped sequence.
:type reshape_size: int
:param name: Layer name.
:type name: basestring
:param act: Activation type.
:type act: BaseActivation
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute or None or bool
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
inputs=[input.name],
name=name,
size=reshape_size,
type=LayerType.SEQUENCE_RESHAPE,
bias=ParamAttr.to_bias(bias_attr),
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
size=reshape_size,
layer_type=LayerType.SEQUENCE_RESHAPE,
parents=[input])
@wrap_name_default()
@layer_support()
def interpolation_layer(input, weight, name=None, layer_attr=None):
......@@ -2604,6 +2661,10 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute or None or bool
:return: LayerOutput object.
:rtype: LayerOutput
"""
......
......@@ -5,6 +5,6 @@ last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat)
test_seq_concat_reshape)
export whole_configs=(test_split_datasource)
......@@ -23,17 +23,29 @@ layers {
input_layer_name: "data2"
}
}
layers {
name: "__seqreshape_0__"
type: "seqreshape"
size: 5
active_type: "linear"
inputs {
input_layer_name: "data1"
}
}
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
output_layer_names: "__seqreshape_0__"
sub_models {
name: "root"
layer_names: "data1"
layer_names: "data2"
layer_names: "__seqconcat_0__"
layer_names: "__seqreshape_0__"
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
output_layer_names: "__seqreshape_0__"
is_recurrent_layer_group: false
}
......@@ -3,7 +3,10 @@ from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
din1 = data_layer(name='data1', size=30)
din2 = data_layer(name='data2', size=30)
outputs(seq_concat_layer(a=din1, b=din2))
opts = []
opts.append(seq_concat_layer(a=din1, b=din2))
opts.append(seq_reshape_layer(input=din1, reshape_size=5))
outputs(opts)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册