提交 5258bcf3 编写于 作者: L Luo Tao

implement more layers in v2

上级 f25c9c5f
......@@ -139,24 +139,12 @@ lstmemory
:members: lstmemory
:noindex:
lstm_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: lstm_step_layer
:noindex:
grumemory
---------
.. automodule:: paddle.trainer_config_helpers.layers
:members: grumemory
:noindex:
gru_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: gru_step_layer
:noindex:
Recurrent Layer Group
=====================
......@@ -172,6 +160,18 @@ recurrent_group
:members: recurrent_group
:noindex:
lstm_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: lstm_step_layer
:noindex:
gru_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: gru_step_layer
:noindex:
beam_search
------------
.. automodule:: paddle.trainer_config_helpers.layers
......@@ -308,6 +308,12 @@ repeat_layer
:members: repeat_layer
:noindex:
rotate_layer
------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: rotate_layer
:noindex:
seq_reshape_layer
-----------------
.. automodule:: paddle.trainer_config_helpers.layers
......@@ -462,6 +468,12 @@ ctc_layer
:members: ctc_layer
:noindex:
warp_ctc_layer
--------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: warp_ctc_layer
:noindex:
nce_layer
-----------
.. automodule:: paddle.trainer_config_helpers.layers
......
......@@ -30,88 +30,28 @@ except ImportError:
import copy
__all__ = [
"full_matrix_projection",
"AggregateLevel",
"ExpandLevel",
"identity_projection",
"dotmul_projection",
"dotmul_operator",
"repeat_layer",
"seq_reshape_layer",
"table_projection",
"mixed_layer",
"data_layer",
"embedding_layer",
"fc_layer",
"grumemory",
"pooling_layer",
"lstmemory",
"last_seq",
"first_seq",
"cos_sim",
"hsigmoid",
"conv_projection",
"regression_cost",
'classification_cost',
"LayerOutput",
'img_conv_layer',
'img_pool_layer',
'batch_norm_layer',
'img_cmrnorm_layer',
'addto_layer',
'concat_layer',
'seq_concat_layer',
'lstm_step_layer',
'recurrent_group',
'memory',
'StaticInput',
'expand_layer',
'scaling_layer',
'scaling_projection',
'power_layer',
'interpolation_layer',
'bilinear_interp_layer',
'trans_layer',
'rotate_layer',
'sum_to_one_norm_layer',
'get_output_layer',
'LayerType',
'context_projection',
'beam_search',
'maxid_layer',
'GeneratedInput',
'SubsequenceInput',
'gru_step_layer',
'recurrent_layer',
'BaseGeneratedInput',
'conv_operator',
'conv_shift_layer',
'tensor_layer',
'selective_fc_layer',
'sampling_id_layer',
'slope_intercept_layer',
'trans_full_matrix_projection',
'linear_comb_layer',
'convex_comb_layer',
'ctc_layer',
'warp_ctc_layer',
'crf_layer',
'crf_decoding_layer',
'nce_layer',
'cross_entropy_with_selfnorm',
'cross_entropy',
'multi_binary_label_cross_entropy',
'sum_cost',
'rank_cost',
'lambda_cost',
'huber_cost',
'block_expand_layer',
'maxout_layer',
'out_prod_layer',
'print_layer',
'priorbox_layer',
'spp_layer',
'pad_layer',
"full_matrix_projection", "AggregateLevel", "ExpandLevel",
"identity_projection", "dotmul_projection", "dotmul_operator",
"repeat_layer", "seq_reshape_layer", "table_projection", "mixed_layer",
"data_layer", "embedding_layer", "fc_layer", "grumemory", "pooling_layer",
"lstmemory", "last_seq", "first_seq", "cos_sim", "hsigmoid",
"conv_projection", "regression_cost", 'classification_cost', "LayerOutput",
'img_conv_layer', 'img_pool_layer', 'batch_norm_layer', 'img_cmrnorm_layer',
'addto_layer', 'concat_layer', 'seq_concat_layer', 'lstm_step_layer',
'recurrent_group', 'memory', 'StaticInput', 'expand_layer', 'scaling_layer',
'scaling_projection', 'power_layer', 'interpolation_layer',
'bilinear_interp_layer', 'trans_layer', 'rotate_layer',
'sum_to_one_norm_layer', 'get_output_layer', 'LayerType',
'context_projection', 'beam_search', 'maxid_layer', 'GeneratedInput',
'SubsequenceInput', 'gru_step_layer', 'recurrent_layer',
'BaseGeneratedInput', 'conv_operator', 'conv_shift_layer', 'tensor_layer',
'selective_fc_layer', 'sampling_id_layer', 'slope_intercept_layer',
'trans_full_matrix_projection', 'linear_comb_layer', 'convex_comb_layer',
'ctc_layer', 'warp_ctc_layer', 'crf_layer', 'crf_decoding_layer',
'nce_layer', 'cross_entropy_with_selfnorm', 'cross_entropy',
'multi_binary_label_cross_entropy', 'sum_cost', 'rank_cost', 'lambda_cost',
'huber_cost', 'block_expand_layer', 'maxout_layer', 'out_prod_layer',
'print_layer', 'priorbox_layer', 'spp_layer', 'pad_layer', 'eos_layer'
]
......@@ -1287,6 +1227,12 @@ def last_seq(input,
"""
Get Last Timestamp Activation of a sequence.
The simple usage is:
.. code-block:: python
seq = last_seq(input=layer)
:param agg_level: Aggregated level
:param name: Layer name.
:type name: basestring
......@@ -1325,6 +1271,12 @@ def first_seq(input,
"""
Get First Timestamp Activation of a sequence.
The simple usage is:
.. code-block:: python
seq = first_seq(input=layer)
:param agg_level: aggregation level
:param name: Layer name.
:type name: basestring
......@@ -1425,7 +1377,7 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None):
.. code-block:: python
expand = repeat_layer(layer, 4)
expand = repeat_layer(input=layer, num_repeats=4)
:param input: Input layer
:type input: LayerOutput
......@@ -1797,6 +1749,12 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None):
Note that the above computation is for one sample. Multiple samples are
processed in one batch.
The example usage is:
.. code-block:: python
cos = cos_sim(a=layer1, b=layer2, size=3)
:param name: layer name
:type name: basestring
:param a: input layer a
......@@ -1958,6 +1916,16 @@ def img_conv_layer(input,
pieces. First 256/4 = 64 channels will process by first 32 filters. The
rest channels will be processed by rest group of filters.
The example usage is:
.. code-block:: python
conv = img_conv_layer(input=data, filter_size=1, filter_size_y=1,
num_channels=8,
num_filters=16, stride=1,
bias_attr=False,
act=ReluActivation())
:param name: Layer name.
:type name: basestring
:param input: Layer Input.
......@@ -2097,6 +2065,34 @@ def img_pool_layer(input,
.. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/
- ceil_mode=True:
.. math::
w = 1 + int(ceil(input\_width + 2 * padding - pool\_size) / float(stride))
h = 1 + int(ceil(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
- ceil_mode=False:
.. math::
w = 1 + int(floor(input\_width + 2 * padding - pool\_size) / float(stride))
h = 1 + int(floor(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
The example usage is:
.. code-block:: python
maxpool = img_pool_layer(input=conv,
pool_size=3,
pool_size_y=5,
num_channels=8,
stride=1,
stride_y=2,
padding=1,
padding_y=2,
pool_type=MaxPooling())
:param padding: pooling padding width.
:type padding: int
:param padding_y: pooling padding height. It's equal to padding by default.
......@@ -2123,19 +2119,6 @@ def img_pool_layer(input,
:param ceil_mode: Wether to use ceil mode to calculate output height and with.
Defalut is True. If set false, Otherwise use floor.
- ceil_mode=True:
.. math::
w = 1 + int(ceil(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(ceil(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
- ceil_mode=False:
.. math::
w = 1 + int(floor(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(floor(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
:type ceil_mode: bool
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -2197,6 +2180,15 @@ def spp_layer(input,
The details please refer to
`Kaiming He's paper <https://arxiv.org/abs/1406.4729>`_.
The example usage is:
.. code-block:: python
spp = spp_layer(input=data,
pyramid_height=2,
num_channels=16,
pool_type=MaxPooling())
:param name: layer name.
:type name: basestring
:param input: layer's input.
......@@ -2285,6 +2277,12 @@ def img_cmrnorm_layer(input,
The details please refer to
`Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_.
The example usage is:
.. code-block:: python
norm = img_cmrnorm_layer(input=net, size=5)
:param name: layer name.
:type name: None|basestring
:param input: layer's input.
......@@ -2340,6 +2338,12 @@ def batch_norm_layer(input,
The details of batch normalization please refer to this
`paper <http://arxiv.org/abs/1502.03167>`_.
The example usage is:
.. code-block:: python
norm = batch_norm_layer(input=net, act=ReluActivation())
:param name: layer name.
:type name: basestring
:param input: batch normalization input. Better be linear activation.
......@@ -3903,13 +3907,13 @@ def conv_shift_layer(a, b, name=None, layer_attr=None):
.. code-block:: python
conv_shift = conv_shift_layer(input=[layer1, layer2])
conv_shift = conv_shift_layer(a=layer1, b=layer2)
:param name: layer name
:type name: basestring
:param a: Input layer a.
:type a: LayerOutput
:param b: input layer b
:param b: input layer b.
:type b: LayerOutput
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
......@@ -4001,8 +4005,8 @@ def tensor_layer(a,
@wrap_act_default()
@layer_support()
def selective_fc_layer(input,
select,
size,
select=None,
act=None,
name=None,
pass_generation=False,
......@@ -4029,6 +4033,7 @@ def selective_fc_layer(input,
:type input: LayerOutput|list|tuple
:param select: The select layer. The output of select layer should be a
sparse binary matrix, and treat as the mask of selective fc.
If is None, acts exactly like fc_layer.
:type select: LayerOutput
:param size: The layer dimension.
:type size: int
......@@ -4257,7 +4262,7 @@ def block_expand_layer(input,
.. code-block:: python
block_expand = block_expand_layer(input,
block_expand = block_expand_layer(input=layer,
num_channels=128,
stride_x=1,
stride_y=1,
......@@ -4594,6 +4599,13 @@ def crf_decoding_layer(input,
this layer will also calculate error. output.value[i] is 1 for incorrect
decoding or 0 for correct decoding.
The simple usage:
.. code-block:: python
crf_decoding = crf_decoding_layer(input=input,
size=label_dim)
:param input: The first input layer.
:type input: LayerOutput
:param size: size of this layer.
......
......@@ -19,11 +19,12 @@ import trainer
import event
import data_type
import attr
import pooling
import py_paddle.swig_paddle as api
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr'
'event', 'data_type', 'attr', 'pooling'
]
......
......@@ -76,12 +76,20 @@ from paddle.trainer_config_helpers.default_decorators import wrap_name_default
import data_type
import activation
import attr
import pooling
__all__ = [
'parse_network', 'data', 'fc', 'max_id', 'classification_cost',
'cross_entropy_cost', 'cross_entropy_with_selfnorm_cost', 'regression_cost',
'parse_network', 'data', 'fc', 'conv_shift', 'img_conv', 'img_pool', 'spp',
'maxout', 'img_cmrnorm', 'batch_norm', 'sum_to_one_norm', 'recurrent',
'lstmemory', 'grumemory', 'pool', 'last_seq', 'first_seq', 'concat',
'seq_concat', 'block_expand', 'expand', 'repeat', 'seq_reshape', 'addto',
'linear_comb', 'interpolation', 'bilinear_interp', 'power', 'scaling',
'slope_intercept', 'tensor', 'cos_sim', 'trans', 'max_id', 'sampling_id',
'pad', 'classification_cost', 'cross_entropy_cost',
'cross_entropy_with_selfnorm_cost', 'regression_cost',
'multi_binary_label_cross_entropy_cost', 'rank_cost', 'lambda_cost',
'sum_cost', 'huber_cost'
'sum_cost', 'huber_cost', 'crf', 'crf_decoding', 'ctc', 'warp_ctc', 'nce',
'hsigmoid', 'eos'
]
......@@ -130,11 +138,8 @@ class Layer(object):
raise NotImplementedError()
def __convert_to_v2__(method_name, name_prefix, parent_names):
if name_prefix is not None:
wrapper = wrap_name_default(name_prefix=name_prefix)
else:
wrapper = None
def __convert_to_v2__(method_name, parent_names):
wrapper = wrap_name_default(name_prefix=method_name)
class V2LayerImpl(Layer):
def __init__(self, name=None, **kwargs):
......@@ -192,44 +197,92 @@ class DataLayerV2(Layer):
data = DataLayerV2
fc = __convert_to_v2__('fc_layer', name_prefix='fc', parent_names=['input'])
max_id = __convert_to_v2__(
'maxid_layer', name_prefix='maxid', parent_names=['input'])
classification_cost = __convert_to_v2__(
'classification_cost',
name_prefix='classification_cost',
parent_names=['input', 'label', 'weight'])
regression_cost = __convert_to_v2__(
'regression_cost',
name_prefix='regression_cost',
parent_names=['input', 'label', 'weight'])
cross_entropy_cost = __convert_to_v2__(
'cross_entropy',
name_prefix='cross_entropy',
parent_names=['input', 'label'])
cross_entropy_with_selfnorm_cost = __convert_to_v2__(
'cross_entropy_with_selfnorm',
name_prefix='cross_entropy_with_selfnorm',
parent_names=['input', 'label'])
multi_binary_label_cross_entropy_cost = __convert_to_v2__(
'multi_binary_label_cross_entropy',
name_prefix='multi_binary_label_cross_entropy',
parent_names=['input', 'label'])
rank_cost = __convert_to_v2__(
'rank_cost',
name_prefix='rank_cost',
parent_names=['left', 'right', 'label', 'weight'])
lambda_cost = __convert_to_v2__(
'lambda_cost', name_prefix='lambda_cost', parent_names=['input', 'score'])
sum_cost = __convert_to_v2__(
'sum_cost', name_prefix='sum_cost', parent_names=['input'])
huber_cost = __convert_to_v2__(
'huber_cost', name_prefix='huber_cost', parent_names=['input', 'label'])
AggregateLevel = conf_helps.layers.AggregateLevel
ExpandLevel = conf_helps.layers.ExpandLevel
layer_list = [
# [V2LayerImpl, V1_method_name, parent_names]
# fully connected layers
['fc', 'fc_layer', ['input']],
# conv layers
['conv_shift', 'conv_shift_layer', ['a', 'b']],
['img_conv', 'img_conv_layer', ['input']],
# image pooling layers
['img_pool', 'img_pool_layer', ['input']],
['spp', 'spp_layer', ['input']],
['maxout', 'maxout_layer', ['input']],
# norm layers
['img_cmrnorm', 'img_cmrnorm_layer', ['input']],
['batch_norm', 'batch_norm_layer', ['input']],
['sum_to_one_norm', 'sum_to_one_norm_layer', ['input']],
# recurrent layers
['recurrent', 'recurrent_layer', ['input']],
['lstmemory', 'lstmemory', ['input']],
['grumemory', 'grumemory', ['input']],
# aggregate layers
['pool', 'pooling_layer', ['input']],
['last_seq', 'last_seq', ['input']],
['first_seq', 'first_seq', ['input']],
['concat', 'concat_layer', ['input']],
['seq_concat', 'seq_concat_layer', ['a', 'b']],
# reshaping layers
['block_expand', 'block_expand_layer', ['input']],
['expand', 'expand_layer', ['input', 'expand_as']],
['repeat', 'repeat_layer', ['input']],
['rotate', 'rotate_layer', ['input']],
['seq_reshape', 'seq_reshape_layer', ['input']],
# math layers
['addto', 'addto_layer', ['input']],
['linear_comb', 'linear_comb_layer', ['weights', 'vectors']],
['interpolation', 'interpolation_layer', ['input', 'weight']],
['bilinear_interp', 'bilinear_interp_layer', ['input']],
['power', 'power_layer', ['input', 'weight']],
['scaling', 'scaling_layer', ['input', 'weight']],
['slope_intercept', 'slope_intercept_layer', ['input']],
['tensor', 'tensor_layer', ['a', 'b']],
['cos_sim', 'cos_sim', ['a', 'b']],
['trans', 'trans_layer', ['input']],
# sampling layers
['max_id', 'maxid_layer', ['input']],
['sampling_id', 'sampling_id_layer', ['input']],
# slicing and joining layers
['pad', 'pad_layer', ['input']],
# cost layers
[
'classification_cost', 'classification_cost',
['input', 'label', 'weight']
],
['regression_cost', 'regression_cost', ['input', 'label', 'weight']],
['cross_entropy_cost', 'cross_entropy', ['input', 'label']],
[
'cross_entropy_with_selfnorm_cost', 'cross_entropy_with_selfnorm',
['input', 'label']
],
[
'multi_binary_label_cross_entropy_cost',
'multi_binary_label_cross_entropy', ['input', 'label']
],
['rank_cost', 'rank_cost', ['left', 'right', 'label', 'weight']],
['lambda_cost', 'lambda_cost', ['input', 'score']],
['sum_cost', 'sum_cost', ['input']],
['huber_cost', 'huber_cost', ['input', 'label']],
['crf', 'crf_layer', ['input', 'label']],
['crf_decoding', 'crf_decoding_layer', ['input']],
['ctc', 'ctc_layer', ['input', 'label']],
['warp_ctc', 'warp_ctc_layer', ['input', 'label']],
['nce', 'nce_layer', ['input', 'label']],
['hsigmoid', 'hsigmoid', ['input', 'label']],
# check layers
['eos', 'eos_layer', ['input']]
]
for l in layer_list:
globals()[l[0]] = __convert_to_v2__(l[1], l[2])
if __name__ == '__main__':
pixel = data(name='pixel', type=data_type.dense_vector(784))
pixel = data(name='pixel', type=data_type.dense_vector(128))
label = data(name='label', type=data_type.integer_value(10))
weight = data(name='weight', type=data_type.dense_vector(10))
word = data(name='word', type=data_type.integer_value(12))
score = data(name='score', type=data_type.dense_vector(1))
hidden = fc(input=pixel,
......@@ -237,7 +290,90 @@ if __name__ == '__main__':
act=activation.Sigmoid(),
param_attr=attr.Param(name='hidden'))
inference = fc(input=hidden, size=10, act=activation.Softmax())
print parse_network(inference)
# test conv layers
conv1 = conv_shift(a=pixel, b=score)
conv2 = img_conv(
input=pixel,
filter_size=1,
filter_size_y=1,
num_channels=8,
num_filters=16,
act=activation.Linear())
print parse_network(conv1, conv2)
# test image pooling layers
maxpool = img_pool(
input=conv2,
pool_size=2,
num_channels=16,
padding=1,
pool_type=pooling.Max())
spp = spp(input=conv2,
pyramid_height=2,
num_channels=16,
pool_type=pooling.Max())
maxout = maxout(input=conv2, num_channels=16, groups=4)
print parse_network(maxpool, spp, maxout)
# test norm layers
norm1 = img_cmrnorm(input=maxpool, size=5)
norm2 = batch_norm(input=maxpool)
norm3 = sum_to_one_norm(input=maxpool)
print parse_network(norm1, norm2, norm3)
# test recurrent layers
recurrent = recurrent(input=word)
lstm = lstmemory(input=word)
gru = grumemory(input=word)
print parse_network(recurrent, lstm, gru)
# test aggregate layers
pool = pool(
input=pixel,
pooling_type=pooling.Avg(),
agg_level=AggregateLevel.EACH_SEQUENCE)
last_seq = last_seq(input=pixel)
first_seq = first_seq(input=pixel)
concat = concat(input=[last_seq, first_seq])
seq_concat = seq_concat(a=last_seq, b=first_seq)
print parse_network(pool, last_seq, first_seq, concat, seq_concat)
# test reshaping layers
block_expand = block_expand(
input=maxout, num_channels=4, stride_x=1, block_x=1)
expand = expand(
input=last_seq, expand_as=pixel, expand_level=ExpandLevel.FROM_TIMESTEP)
repeat = repeat(input=last_seq, num_repeats=4)
reshape = seq_reshape(input=last_seq, reshape_size=4)
rotate = rotate(input=pixel, height=16, width=49)
print parse_network(block_expand, expand, repeat, reshape, rotate)
# test math layers
addto = addto(input=[last_seq, first_seq])
linear_comb = linear_comb(weights=weight, vectors=hidden, size=10)
interpolation = interpolation(input=[hidden, hidden], weight=score)
bilinear = bilinear_interp(input=conv2, out_size_x=4, out_size_y=4)
power = power(input=conv1, weight=score)
scaling = scaling(input=conv1, weight=score)
slope = slope_intercept(input=conv1)
tensor = tensor(a=last_seq, b=first_seq, size=1000)
cos_sim = cos_sim(a=last_seq, b=first_seq)
trans = trans(input=tensor)
print parse_network(addto, linear_comb, interpolation, bilinear, power,
scaling, slope, tensor, cos_sim, trans)
# test sampling layers
maxid = max_id(input=inference)
sampling_id = sampling_id(input=inference)
print parse_network(maxid, sampling_id)
# test slicing and joining layers
pad = pad(input=maxpool, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
print parse_network(pad)
# test cost layers
cost1 = classification_cost(input=inference, label=label)
cost2 = classification_cost(input=inference, label=label, weight=weight)
cost3 = cross_entropy_cost(input=inference, label=label)
......@@ -249,9 +385,18 @@ if __name__ == '__main__':
cost9 = lambda_cost(input=inference, score=score)
cost10 = sum_cost(input=inference)
cost11 = huber_cost(input=score, label=label)
print parse_network(cost1, cost2)
print parse_network(cost3, cost4)
print parse_network(cost5, cost6)
print parse_network(cost7, cost8, cost9, cost10, cost11)
print parse_network(inference, maxid)
crf = crf(input=inference, label=label)
crf_decoding = crf_decoding(input=inference, size=3)
ctc = ctc(input=inference, label=label)
warp_ctc = warp_ctc(input=pixel, label=label)
nce = nce(input=inference, label=label, num_classes=3)
hsigmoid = hsigmoid(input=inference, label=label, num_classes=3)
print parse_network(crf, crf_decoding, ctc, warp_ctc, nce, hsigmoid)
# test check layers
eos = eos(input=maxid, eos_id=5)
print parse_network(eos)
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers.poolings import *
__all__ = ["Max", "CudnnMax", "Avg", "CudnnAvg", "Sum", "SquareRootN"]
Max = MaxPooling
CudnnMax = CudnnMaxPooling
Avg = AvgPooling
CudnnAvg = CudnnAvgPooling
Sum = SumPooling
SquareRootN = SquareRootNPooling
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册