提交 94d83fcd 编写于 作者: H Hu Weiwei 提交者: GitHub

Merge pull request #2308 from wwhu/multiplex

add multiplex layer for python API
......@@ -119,6 +119,7 @@ __all__ = [
'eos_layer',
'smooth_l1_cost',
'layer_support',
'multiplex_layer',
]
......@@ -185,6 +186,7 @@ class LayerType(object):
MAXOUT = "maxout"
SPP_LAYER = "spp"
PAD_LAYER = "pad"
MULTIPLEX_LAYER = "multiplex"
PRINT_LAYER = "print"
PRIORBOX_LAYER = "priorbox"
......@@ -5465,3 +5467,54 @@ def smooth_l1_cost(input, label, name=None, coeff=1.0, layer_attr=None):
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.SMOOTH_L1, parents=[input, label], size=1)
@wrap_name_default()
def multiplex_layer(input, name=None, layer_attr=None):
"""
This layer multiplex multiple layers according to the index,
which is provided by the first input layer.
inputs[0]: the index of the layer to output of size batchSize.
inputs[1:N]; the candidate output data.
For each index i from 0 to batchSize -1, the output is the i-th row of the
(index[i] + 1)-th layer.
For each i-th row of output:
.. math::
y[i][j] = x_{x_{0}[i] + 1}[i][j], j = 0,1, ... , (x_{1}.width - 1)
where, y is output. :math:`x_{k}` is the k-th input layer and
:math:`k = x_{0}[i] + 1`.
.. code-block:: python
maxid = multiplex_layer(input=layers)
:param input: Input layers.
:type input: list of LayerOutput
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, collections.Sequence)
assert len(input) > 2, 'multiplex_layer should have more than 2 inputs'
for i in range(1, len(input)):
assert isinstance(input[i], LayerOutput)
assert input[i].size == input[1].size, \
"All the input layers except the first one should have the same size"
l = Layer(
name=name,
type='multiplex',
inputs=[x.name for x in input],
size=input[1].size,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.MULTIPLEX_LAYER,
parents=input,
size=l.config.size)
......@@ -5,6 +5,6 @@ last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat_reshape test_pad test_smooth_l1)
test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer)
export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "index"
type: "data"
size: 1
active_type: ""
}
layers {
name: "data1"
type: "data"
size: 30
active_type: ""
}
layers {
name: "data2"
type: "data"
size: 30
active_type: ""
}
layers {
name: "data3"
type: "data"
size: 30
active_type: ""
}
layers {
name: "__multiplex_layer_0__"
type: "multiplex"
size: 30
active_type: ""
inputs {
input_layer_name: "index"
}
inputs {
input_layer_name: "data1"
}
inputs {
input_layer_name: "data2"
}
inputs {
input_layer_name: "data3"
}
}
input_layer_names: "index"
input_layer_names: "data1"
input_layer_names: "data2"
input_layer_names: "data3"
output_layer_names: "__multiplex_layer_0__"
sub_models {
name: "root"
layer_names: "index"
layer_names: "data1"
layer_names: "data2"
layer_names: "data3"
layer_names: "__multiplex_layer_0__"
input_layer_names: "index"
input_layer_names: "data1"
input_layer_names: "data2"
input_layer_names: "data3"
output_layer_names: "__multiplex_layer_0__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
index = data_layer(name='index', size=1)
din1 = data_layer(name='data1', size=30)
din2 = data_layer(name='data2', size=30)
din3 = data_layer(name='data3', size=30)
dout = multiplex_layer([index, din1, din2, din3])
outputs(dout)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册