提交 db8566d7 编写于 作者: Y Yu Yang

Merge branch 'feature/clean_mnist_v2' into feature/tester

...@@ -57,7 +57,7 @@ before_install: ...@@ -57,7 +57,7 @@ before_install:
- if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# protobuf version. # protobuf version.
- pip install numpy wheel 'protobuf==3.1' sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker 'scikit-learn>=0.18.0' - pip install numpy wheel 'protobuf==3.1' sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker 'scikit-learn>=0.18.0' 'scipy>=0.18.0'
script: script:
- paddle/scripts/travis/main.sh - paddle/scripts/travis/main.sh
notifications: notifications:
......
...@@ -139,24 +139,12 @@ lstmemory ...@@ -139,24 +139,12 @@ lstmemory
:members: lstmemory :members: lstmemory
:noindex: :noindex:
lstm_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: lstm_step_layer
:noindex:
grumemory grumemory
--------- ---------
.. automodule:: paddle.trainer_config_helpers.layers .. automodule:: paddle.trainer_config_helpers.layers
:members: grumemory :members: grumemory
:noindex: :noindex:
gru_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: gru_step_layer
:noindex:
Recurrent Layer Group Recurrent Layer Group
===================== =====================
...@@ -172,6 +160,18 @@ recurrent_group ...@@ -172,6 +160,18 @@ recurrent_group
:members: recurrent_group :members: recurrent_group
:noindex: :noindex:
lstm_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: lstm_step_layer
:noindex:
gru_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: gru_step_layer
:noindex:
beam_search beam_search
------------ ------------
.. automodule:: paddle.trainer_config_helpers.layers .. automodule:: paddle.trainer_config_helpers.layers
...@@ -308,6 +308,12 @@ repeat_layer ...@@ -308,6 +308,12 @@ repeat_layer
:members: repeat_layer :members: repeat_layer
:noindex: :noindex:
rotate_layer
------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: rotate_layer
:noindex:
seq_reshape_layer seq_reshape_layer
----------------- -----------------
.. automodule:: paddle.trainer_config_helpers.layers .. automodule:: paddle.trainer_config_helpers.layers
...@@ -462,6 +468,12 @@ ctc_layer ...@@ -462,6 +468,12 @@ ctc_layer
:members: ctc_layer :members: ctc_layer
:noindex: :noindex:
warp_ctc_layer
--------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: warp_ctc_layer
:noindex:
nce_layer nce_layer
----------- -----------
.. automodule:: paddle.trainer_config_helpers.layers .. automodule:: paddle.trainer_config_helpers.layers
......
...@@ -112,6 +112,8 @@ __all__ = [ ...@@ -112,6 +112,8 @@ __all__ = [
'priorbox_layer', 'priorbox_layer',
'spp_layer', 'spp_layer',
'pad_layer', 'pad_layer',
'eos_layer',
'layer_support',
] ]
...@@ -708,6 +710,7 @@ class MixedLayerType(LayerOutput): ...@@ -708,6 +710,7 @@ class MixedLayerType(LayerOutput):
# update the size which might be computed inside MixedLayer # update the size which might be computed inside MixedLayer
# according to the operator's output size # according to the operator's output size
self.size = ml.config.size self.size = ml.config.size
self.finalized = True
@wrap_name_default("mixed") @wrap_name_default("mixed")
...@@ -1287,6 +1290,12 @@ def last_seq(input, ...@@ -1287,6 +1290,12 @@ def last_seq(input,
""" """
Get Last Timestamp Activation of a sequence. Get Last Timestamp Activation of a sequence.
The simple usage is:
.. code-block:: python
seq = last_seq(input=layer)
:param agg_level: Aggregated level :param agg_level: Aggregated level
:param name: Layer name. :param name: Layer name.
:type name: basestring :type name: basestring
...@@ -1325,6 +1334,12 @@ def first_seq(input, ...@@ -1325,6 +1334,12 @@ def first_seq(input,
""" """
Get First Timestamp Activation of a sequence. Get First Timestamp Activation of a sequence.
The simple usage is:
.. code-block:: python
seq = first_seq(input=layer)
:param agg_level: aggregation level :param agg_level: aggregation level
:param name: Layer name. :param name: Layer name.
:type name: basestring :type name: basestring
...@@ -1425,7 +1440,7 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None): ...@@ -1425,7 +1440,7 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None):
.. code-block:: python .. code-block:: python
expand = repeat_layer(layer, 4) expand = repeat_layer(input=layer, num_repeats=4)
:param input: Input layer :param input: Input layer
:type input: LayerOutput :type input: LayerOutput
...@@ -1797,6 +1812,12 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None): ...@@ -1797,6 +1812,12 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None):
Note that the above computation is for one sample. Multiple samples are Note that the above computation is for one sample. Multiple samples are
processed in one batch. processed in one batch.
The example usage is:
.. code-block:: python
cos = cos_sim(a=layer1, b=layer2, size=3)
:param name: layer name :param name: layer name
:type name: basestring :type name: basestring
:param a: input layer a :param a: input layer a
...@@ -1958,6 +1979,16 @@ def img_conv_layer(input, ...@@ -1958,6 +1979,16 @@ def img_conv_layer(input,
pieces. First 256/4 = 64 channels will process by first 32 filters. The pieces. First 256/4 = 64 channels will process by first 32 filters. The
rest channels will be processed by rest group of filters. rest channels will be processed by rest group of filters.
The example usage is:
.. code-block:: python
conv = img_conv_layer(input=data, filter_size=1, filter_size_y=1,
num_channels=8,
num_filters=16, stride=1,
bias_attr=False,
act=ReluActivation())
:param name: Layer name. :param name: Layer name.
:type name: basestring :type name: basestring
:param input: Layer Input. :param input: Layer Input.
...@@ -2097,6 +2128,34 @@ def img_pool_layer(input, ...@@ -2097,6 +2128,34 @@ def img_pool_layer(input,
.. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/ .. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/
- ceil_mode=True:
.. math::
w = 1 + int(ceil(input\_width + 2 * padding - pool\_size) / float(stride))
h = 1 + int(ceil(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
- ceil_mode=False:
.. math::
w = 1 + int(floor(input\_width + 2 * padding - pool\_size) / float(stride))
h = 1 + int(floor(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
The example usage is:
.. code-block:: python
maxpool = img_pool_layer(input=conv,
pool_size=3,
pool_size_y=5,
num_channels=8,
stride=1,
stride_y=2,
padding=1,
padding_y=2,
pool_type=MaxPooling())
:param padding: pooling padding width. :param padding: pooling padding width.
:type padding: int :type padding: int
:param padding_y: pooling padding height. It's equal to padding by default. :param padding_y: pooling padding height. It's equal to padding by default.
...@@ -2123,19 +2182,6 @@ def img_pool_layer(input, ...@@ -2123,19 +2182,6 @@ def img_pool_layer(input,
:param ceil_mode: Wether to use ceil mode to calculate output height and with. :param ceil_mode: Wether to use ceil mode to calculate output height and with.
Defalut is True. If set false, Otherwise use floor. Defalut is True. If set false, Otherwise use floor.
- ceil_mode=True:
.. math::
w = 1 + int(ceil(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(ceil(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
- ceil_mode=False:
.. math::
w = 1 + int(floor(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(floor(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
:type ceil_mode: bool :type ceil_mode: bool
:return: LayerOutput object. :return: LayerOutput object.
:rtype: LayerOutput :rtype: LayerOutput
...@@ -2197,6 +2243,15 @@ def spp_layer(input, ...@@ -2197,6 +2243,15 @@ def spp_layer(input,
The details please refer to The details please refer to
`Kaiming He's paper <https://arxiv.org/abs/1406.4729>`_. `Kaiming He's paper <https://arxiv.org/abs/1406.4729>`_.
The example usage is:
.. code-block:: python
spp = spp_layer(input=data,
pyramid_height=2,
num_channels=16,
pool_type=MaxPooling())
:param name: layer name. :param name: layer name.
:type name: basestring :type name: basestring
:param input: layer's input. :param input: layer's input.
...@@ -2285,6 +2340,12 @@ def img_cmrnorm_layer(input, ...@@ -2285,6 +2340,12 @@ def img_cmrnorm_layer(input,
The details please refer to The details please refer to
`Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_. `Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_.
The example usage is:
.. code-block:: python
norm = img_cmrnorm_layer(input=net, size=5)
:param name: layer name. :param name: layer name.
:type name: None|basestring :type name: None|basestring
:param input: layer's input. :param input: layer's input.
...@@ -2340,6 +2401,12 @@ def batch_norm_layer(input, ...@@ -2340,6 +2401,12 @@ def batch_norm_layer(input,
The details of batch normalization please refer to this The details of batch normalization please refer to this
`paper <http://arxiv.org/abs/1502.03167>`_. `paper <http://arxiv.org/abs/1502.03167>`_.
The example usage is:
.. code-block:: python
norm = batch_norm_layer(input=net, act=ReluActivation())
:param name: layer name. :param name: layer name.
:type name: basestring :type name: basestring
:param input: batch normalization input. Better be linear activation. :param input: batch normalization input. Better be linear activation.
...@@ -3903,13 +3970,13 @@ def conv_shift_layer(a, b, name=None, layer_attr=None): ...@@ -3903,13 +3970,13 @@ def conv_shift_layer(a, b, name=None, layer_attr=None):
.. code-block:: python .. code-block:: python
conv_shift = conv_shift_layer(input=[layer1, layer2]) conv_shift = conv_shift_layer(a=layer1, b=layer2)
:param name: layer name :param name: layer name
:type name: basestring :type name: basestring
:param a: Input layer a. :param a: Input layer a.
:type a: LayerOutput :type a: LayerOutput
:param b: input layer b :param b: input layer b.
:type b: LayerOutput :type b: LayerOutput
:param layer_attr: layer's extra attribute. :param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute :type layer_attr: ExtraLayerAttribute
...@@ -4001,8 +4068,8 @@ def tensor_layer(a, ...@@ -4001,8 +4068,8 @@ def tensor_layer(a,
@wrap_act_default() @wrap_act_default()
@layer_support() @layer_support()
def selective_fc_layer(input, def selective_fc_layer(input,
select,
size, size,
select=None,
act=None, act=None,
name=None, name=None,
pass_generation=False, pass_generation=False,
...@@ -4029,6 +4096,7 @@ def selective_fc_layer(input, ...@@ -4029,6 +4096,7 @@ def selective_fc_layer(input,
:type input: LayerOutput|list|tuple :type input: LayerOutput|list|tuple
:param select: The select layer. The output of select layer should be a :param select: The select layer. The output of select layer should be a
sparse binary matrix, and treat as the mask of selective fc. sparse binary matrix, and treat as the mask of selective fc.
If is None, acts exactly like fc_layer.
:type select: LayerOutput :type select: LayerOutput
:param size: The layer dimension. :param size: The layer dimension.
:type size: int :type size: int
...@@ -4257,7 +4325,7 @@ def block_expand_layer(input, ...@@ -4257,7 +4325,7 @@ def block_expand_layer(input,
.. code-block:: python .. code-block:: python
block_expand = block_expand_layer(input, block_expand = block_expand_layer(input=layer,
num_channels=128, num_channels=128,
stride_x=1, stride_x=1,
stride_y=1, stride_y=1,
...@@ -4594,6 +4662,13 @@ def crf_decoding_layer(input, ...@@ -4594,6 +4662,13 @@ def crf_decoding_layer(input,
this layer will also calculate error. output.value[i] is 1 for incorrect this layer will also calculate error. output.value[i] is 1 for incorrect
decoding or 0 for correct decoding. decoding or 0 for correct decoding.
The simple usage:
.. code-block:: python
crf_decoding = crf_decoding_layer(input=input,
size=label_dim)
:param input: The first input layer. :param input: The first input layer.
:type input: LayerOutput :type input: LayerOutput
:param size: size of this layer. :param size: size of this layer.
......
...@@ -22,11 +22,12 @@ import data_feeder ...@@ -22,11 +22,12 @@ import data_feeder
from . import dataset from . import dataset
from . import reader from . import reader
import attr import attr
import pooling
import py_paddle.swig_paddle as api import py_paddle.swig_paddle as api
__all__ = [ __all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer', 'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr', 'data_feeder', 'dataset', 'reader' 'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader'
] ]
......
...@@ -71,19 +71,37 @@ import collections ...@@ -71,19 +71,37 @@ import collections
import paddle.trainer_config_helpers as conf_helps import paddle.trainer_config_helpers as conf_helps
from paddle.trainer_config_helpers.config_parser_utils import \ from paddle.trainer_config_helpers.config_parser_utils import \
parse_network_config as __parse__ parse_network_config as __parse__
from paddle.trainer_config_helpers.default_decorators import wrap_name_default from paddle.trainer_config_helpers.default_decorators import wrap_name_default
from paddle.trainer_config_helpers.default_decorators import wrap_act_default
from paddle.trainer_config_helpers.default_decorators import wrap_bias_attr_default
from paddle.trainer_config_helpers.layers import layer_support
import data_type import data_type
import activation import activation
import attr import attr
__all__ = [ __all__ = [
'parse_network', 'data', 'fc', 'max_id', 'classification_cost', 'parse_network', 'data', 'fc', 'conv_shift', 'img_conv', 'img_pool', 'spp',
'cross_entropy_cost', 'cross_entropy_with_selfnorm_cost', 'regression_cost', 'maxout', 'img_cmrnorm', 'batch_norm', 'sum_to_one_norm', 'recurrent',
'lstmemory', 'grumemory', 'pool', 'last_seq', 'first_seq', 'concat',
'seq_concat', 'block_expand', 'expand', 'repeat', 'seq_reshape', 'addto',
'linear_comb', 'interpolation', 'bilinear_interp', 'power', 'scaling',
'slope_intercept', 'tensor', 'cos_sim', 'trans', 'max_id', 'sampling_id',
'pad', 'classification_cost', 'cross_entropy_cost',
'cross_entropy_with_selfnorm_cost', 'regression_cost',
'multi_binary_label_cross_entropy_cost', 'rank_cost', 'lambda_cost', 'multi_binary_label_cross_entropy_cost', 'rank_cost', 'lambda_cost',
'sum_cost', 'huber_cost' 'sum_cost', 'huber_cost', 'crf', 'crf_decoding', 'ctc', 'warp_ctc', 'nce',
'hsigmoid', 'eos'
] ]
__projection_names__ = filter(lambda x: x.endswith('_projection'),
dir(conf_helps))
__all__ += __projection_names__
__operator_names__ = filter(lambda x: x.endswith('_operator'), dir(conf_helps))
__all__ += __operator_names__
def parse_network(*outputs): def parse_network(*outputs):
""" """
...@@ -101,9 +119,8 @@ def parse_network(*outputs): ...@@ -101,9 +119,8 @@ def parse_network(*outputs):
class Layer(object): class Layer(object):
def __init__(self, name, parent_layers): def __init__(self, name=None, parent_layers=None):
assert isinstance(parent_layers, dict) assert isinstance(parent_layers, dict)
assert isinstance(name, basestring)
self.name = name self.name = name
self.__parent_layers__ = parent_layers self.__parent_layers__ = parent_layers
...@@ -122,22 +139,25 @@ class Layer(object): ...@@ -122,22 +139,25 @@ class Layer(object):
self.__parent_layers__[layer_name]) self.__parent_layers__[layer_name])
kwargs[layer_name] = v1_layer kwargs[layer_name] = v1_layer
if self.name not in context: if self.name is None:
return self.to_proto_impl(**kwargs)
elif self.name not in context:
context[self.name] = self.to_proto_impl(**kwargs) context[self.name] = self.to_proto_impl(**kwargs)
return context[self.name] return context[self.name]
def to_proto_impl(self, **kwargs): def to_proto_impl(self, **kwargs):
raise NotImplementedError() raise NotImplementedError()
def __convert_to_v2__(method_name, name_prefix, parent_names): def __convert_to_v2__(method_name, parent_names, is_default_name=True):
if name_prefix is not None: if is_default_name:
wrapper = wrap_name_default(name_prefix=name_prefix) wrapper = wrap_name_default(name_prefix=method_name)
else: else:
wrapper = None wrapper = None
class V2LayerImpl(Layer): class V2LayerImpl(Layer):
def __init__(self, name=None, **kwargs): def __init__(self, **kwargs):
parent_layers = dict() parent_layers = dict()
other_kwargs = dict() other_kwargs = dict()
for pname in parent_names: for pname in parent_names:
...@@ -148,6 +168,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names): ...@@ -148,6 +168,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names):
if key not in parent_names: if key not in parent_names:
other_kwargs[key] = kwargs[key] other_kwargs[key] = kwargs[key]
name = kwargs.get('name', None)
super(V2LayerImpl, self).__init__(name, parent_layers) super(V2LayerImpl, self).__init__(name, parent_layers)
self.__other_kwargs__ = other_kwargs self.__other_kwargs__ = other_kwargs
...@@ -160,7 +181,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names): ...@@ -160,7 +181,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names):
args[each] = kwargs[each] args[each] = kwargs[each]
for each in self.__other_kwargs__: for each in self.__other_kwargs__:
args[each] = self.__other_kwargs__[each] args[each] = self.__other_kwargs__[each]
return getattr(conf_helps, method_name)(name=self.name, **args) return getattr(conf_helps, method_name)(**args)
return V2LayerImpl return V2LayerImpl
...@@ -191,67 +212,171 @@ class DataLayerV2(Layer): ...@@ -191,67 +212,171 @@ class DataLayerV2(Layer):
return getattr(conf_helps, self.__method_name__)(name=self.name, **args) return getattr(conf_helps, self.__method_name__)(name=self.name, **args)
class MixedLayerV2(Layer):
"""
This class is use to support `with` grammar. If not, the following code
could convert mixed_layer simply.
mixed = __convert_to_v2__(
'mixed_layer', name_prefix='mixed', parent_names=['input'])
"""
class AddToSealedMixedLayerExceptionV2(Exception):
pass
def __init__(self,
size=0,
input=None,
name=None,
act=None,
bias_attr=None,
layer_attr=None):
self.__method_name__ = 'mixed_layer'
self.finalized = False
self.__inputs__ = []
if input is not None:
self.__inputs__ = input
other_kwargs = dict()
other_kwargs['name'] = name
other_kwargs['size'] = size
other_kwargs['act'] = act
other_kwargs['bias_attr'] = bias_attr
other_kwargs['layer_attr'] = layer_attr
parent_layers = {"input": self.__inputs__}
super(MixedLayerV2, self).__init__(name, parent_layers)
self.__other_kwargs__ = other_kwargs
def __iadd__(self, other):
if not self.finalized:
self.__inputs__.append(other)
return self
else:
raise MixedLayerTypeV2.AddToSealedMixedLayerExceptionV2()
def __enter__(self):
assert len(self.__inputs__) == 0
return self
def __exit__(self, *args, **kwargs):
self.finalized = True
def to_proto_impl(self, **kwargs):
args = dict()
for each in kwargs:
args[each] = kwargs[each]
for each in self.__other_kwargs__:
args[each] = self.__other_kwargs__[each]
return getattr(conf_helps, self.__method_name__)(**args)
@wrap_name_default("mixed")
@wrap_act_default(act=activation.Linear())
@wrap_bias_attr_default(has_bias=False)
@layer_support(conf_helps.layers.ERROR_CLIPPING, conf_helps.layers.DROPOUT)
def mixed(size=0,
name=None,
input=None,
act=None,
bias_attr=False,
layer_attr=None):
return MixedLayerV2(size, input, name, act, bias_attr, layer_attr)
data = DataLayerV2 data = DataLayerV2
fc = __convert_to_v2__('fc_layer', name_prefix='fc', parent_names=['input']) AggregateLevel = conf_helps.layers.AggregateLevel
max_id = __convert_to_v2__( ExpandLevel = conf_helps.layers.ExpandLevel
'maxid_layer', name_prefix='maxid', parent_names=['input'])
classification_cost = __convert_to_v2__( layer_list = [
'classification_cost', # [V2LayerImpl, V1_method_name, parent_names]
name_prefix='classification_cost', # fully connected layers
parent_names=['input', 'label', 'weight']) ['fc', 'fc_layer', ['input']],
regression_cost = __convert_to_v2__( # conv layers
'regression_cost', ['conv_shift', 'conv_shift_layer', ['a', 'b']],
name_prefix='regression_cost', ['img_conv', 'img_conv_layer', ['input']],
parent_names=['input', 'label', 'weight']) # image pooling layers
cross_entropy_cost = __convert_to_v2__( ['img_pool', 'img_pool_layer', ['input']],
'cross_entropy', ['spp', 'spp_layer', ['input']],
name_prefix='cross_entropy', ['maxout', 'maxout_layer', ['input']],
parent_names=['input', 'label']) # norm layers
cross_entropy_with_selfnorm_cost = __convert_to_v2__( ['img_cmrnorm', 'img_cmrnorm_layer', ['input']],
'cross_entropy_with_selfnorm', ['batch_norm', 'batch_norm_layer', ['input']],
name_prefix='cross_entropy_with_selfnorm', ['sum_to_one_norm', 'sum_to_one_norm_layer', ['input']],
parent_names=['input', 'label']) # recurrent layers
multi_binary_label_cross_entropy_cost = __convert_to_v2__( ['recurrent', 'recurrent_layer', ['input']],
'multi_binary_label_cross_entropy', ['lstmemory', 'lstmemory', ['input']],
name_prefix='multi_binary_label_cross_entropy', ['grumemory', 'grumemory', ['input']],
parent_names=['input', 'label']) # aggregate layers
rank_cost = __convert_to_v2__( ['pool', 'pooling_layer', ['input']],
'rank_cost', ['last_seq', 'last_seq', ['input']],
name_prefix='rank_cost', ['first_seq', 'first_seq', ['input']],
parent_names=['left', 'right', 'label', 'weight']) ['concat', 'concat_layer', ['input']],
lambda_cost = __convert_to_v2__( ['seq_concat', 'seq_concat_layer', ['a', 'b']],
'lambda_cost', name_prefix='lambda_cost', parent_names=['input', 'score']) # reshaping layers
sum_cost = __convert_to_v2__( ['block_expand', 'block_expand_layer', ['input']],
'sum_cost', name_prefix='sum_cost', parent_names=['input']) ['expand', 'expand_layer', ['input', 'expand_as']],
huber_cost = __convert_to_v2__( ['repeat', 'repeat_layer', ['input']],
'huber_cost', name_prefix='huber_cost', parent_names=['input', 'label']) ['rotate', 'rotate_layer', ['input']],
['seq_reshape', 'seq_reshape_layer', ['input']],
if __name__ == '__main__': # math layers
pixel = data(name='pixel', type=data_type.dense_vector(784)) ['addto', 'addto_layer', ['input']],
label = data(name='label', type=data_type.integer_value(10)) ['linear_comb', 'linear_comb_layer', ['weights', 'vectors']],
weight = data(name='weight', type=data_type.dense_vector(10)) ['interpolation', 'interpolation_layer', ['input', 'weight']],
score = data(name='score', type=data_type.dense_vector(1)) ['bilinear_interp', 'bilinear_interp_layer', ['input']],
['power', 'power_layer', ['input', 'weight']],
hidden = fc(input=pixel, ['scaling', 'scaling_layer', ['input', 'weight']],
size=100, ['slope_intercept', 'slope_intercept_layer', ['input']],
act=activation.Sigmoid(), ['tensor', 'tensor_layer', ['a', 'b']],
param_attr=attr.Param(name='hidden')) ['cos_sim', 'cos_sim', ['a', 'b']],
inference = fc(input=hidden, size=10, act=activation.Softmax()) ['trans', 'trans_layer', ['input']],
maxid = max_id(input=inference) # sampling layers
cost1 = classification_cost(input=inference, label=label) ['max_id', 'maxid_layer', ['input']],
cost2 = classification_cost(input=inference, label=label, weight=weight) ['sampling_id', 'sampling_id_layer', ['input']],
cost3 = cross_entropy_cost(input=inference, label=label) # slicing and joining layers
cost4 = cross_entropy_with_selfnorm_cost(input=inference, label=label) ['pad', 'pad_layer', ['input']],
cost5 = regression_cost(input=inference, label=label) # cost layers
cost6 = regression_cost(input=inference, label=label, weight=weight) [
cost7 = multi_binary_label_cross_entropy_cost(input=inference, label=label) 'classification_cost', 'classification_cost',
cost8 = rank_cost(left=score, right=score, label=score) ['input', 'label', 'weight']
cost9 = lambda_cost(input=inference, score=score) ],
cost10 = sum_cost(input=inference) ['regression_cost', 'regression_cost', ['input', 'label', 'weight']],
cost11 = huber_cost(input=score, label=label) ['cross_entropy_cost', 'cross_entropy', ['input', 'label']],
[
print parse_network(cost1, cost2) 'cross_entropy_with_selfnorm_cost', 'cross_entropy_with_selfnorm',
print parse_network(cost3, cost4) ['input', 'label']
print parse_network(cost5, cost6) ],
print parse_network(cost7, cost8, cost9, cost10, cost11) [
print parse_network(inference, maxid) 'multi_binary_label_cross_entropy_cost',
'multi_binary_label_cross_entropy', ['input', 'label']
],
['rank_cost', 'rank_cost', ['left', 'right', 'label', 'weight']],
['lambda_cost', 'lambda_cost', ['input', 'score']],
['sum_cost', 'sum_cost', ['input']],
['huber_cost', 'huber_cost', ['input', 'label']],
['crf', 'crf_layer', ['input', 'label']],
['crf_decoding', 'crf_decoding_layer', ['input']],
['ctc', 'ctc_layer', ['input', 'label']],
['warp_ctc', 'warp_ctc_layer', ['input', 'label']],
['nce', 'nce_layer', ['input', 'label']],
['hsigmoid', 'hsigmoid', ['input', 'label']],
# check layers
['eos', 'eos_layer', ['input']]
]
for l in layer_list:
globals()[l[0]] = __convert_to_v2__(l[1], l[2])
# convert projection
for prj in __projection_names__:
globals()[prj] = __convert_to_v2__(
prj, parent_names=['input'], is_default_name=False)
# convert operator
operator_list = [
# [V1_method_name, parent_names],
['dotmul_operator', ['a', 'b']],
['conv_operator', ['img', 'filter']]
]
for op in operator_list:
globals()[op[0]] = __convert_to_v2__(
op[0], parent_names=op[1], is_default_name=False)
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers.poolings import *
__all__ = ["Max", "CudnnMax", "Avg", "CudnnAvg", "Sum", "SquareRootN"]
Max = MaxPooling
CudnnMax = CudnnMaxPooling
Avg = AvgPooling
CudnnAvg = CudnnAvgPooling
Sum = SumPooling
SquareRootN = SquareRootNPooling
...@@ -19,18 +19,106 @@ import paddle.v2.activation as activation ...@@ -19,18 +19,106 @@ import paddle.v2.activation as activation
import paddle.v2.attr as attr import paddle.v2.attr as attr
import paddle.v2.data_type as data_type import paddle.v2.data_type as data_type
import paddle.v2.layer as layer import paddle.v2.layer as layer
import paddle.v2.pooling as pooling
from paddle.trainer_config_helpers.config_parser_utils import \ from paddle.trainer_config_helpers.config_parser_utils import \
parse_network_config as parse_network parse_network_config as parse_network
pixel = layer.data(name='pixel', type=data_type.dense_vector(784)) pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
label = layer.data(name='label', type=data_type.integer_value(10)) label = layer.data(name='label', type=data_type.integer_value(10))
weight = layer.data(name='weight', type=data_type.dense_vector(10)) weight = layer.data(name='weight', type=data_type.dense_vector(10))
score = layer.data(name='score', type=data_type.dense_vector(1)) score = layer.data(name='score', type=data_type.dense_vector(1))
hidden = layer.fc(input=pixel, hidden = layer.fc(input=pixel,
size=100, size=100,
act=activation.Sigmoid(), act=activation.Sigmoid(),
param_attr=attr.Param(name='hidden')) param_attr=attr.Param(name='hidden'))
inference = layer.fc(input=hidden, size=10, act=activation.Softmax()) inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
conv = layer.img_conv(
input=pixel,
filter_size=1,
filter_size_y=1,
num_channels=8,
num_filters=16,
act=activation.Linear())
class ImageLayerTest(unittest.TestCase):
def test_conv_layer(self):
conv_shift = layer.conv_shift(a=pixel, b=score)
print layer.parse_network(conv, conv_shift)
def test_pooling_layer(self):
maxpool = layer.img_pool(
input=conv,
pool_size=2,
num_channels=16,
padding=1,
pool_type=pooling.Max())
spp = layer.spp(input=conv,
pyramid_height=2,
num_channels=16,
pool_type=pooling.Max())
maxout = layer.maxout(input=conv, num_channels=16, groups=4)
print layer.parse_network(maxpool, spp, maxout)
def test_norm_layer(self):
norm1 = layer.img_cmrnorm(input=conv, size=5)
norm2 = layer.batch_norm(input=conv)
norm3 = layer.sum_to_one_norm(input=conv)
print layer.parse_network(norm1, norm2, norm3)
class AggregateLayerTest(unittest.TestCase):
def test_aggregate_layer(self):
pool = layer.pool(
input=pixel,
pooling_type=pooling.Avg(),
agg_level=layer.AggregateLevel.EACH_SEQUENCE)
last_seq = layer.last_seq(input=pixel)
first_seq = layer.first_seq(input=pixel)
concat = layer.concat(input=[last_seq, first_seq])
seq_concat = layer.seq_concat(a=last_seq, b=first_seq)
print layer.parse_network(pool, last_seq, first_seq, concat, seq_concat)
class MathLayerTest(unittest.TestCase):
def test_math_layer(self):
addto = layer.addto(input=[pixel, pixel])
linear_comb = layer.linear_comb(weights=weight, vectors=hidden, size=10)
interpolation = layer.interpolation(
input=[hidden, hidden], weight=score)
bilinear = layer.bilinear_interp(input=conv, out_size_x=4, out_size_y=4)
power = layer.power(input=pixel, weight=score)
scaling = layer.scaling(input=pixel, weight=score)
slope = layer.slope_intercept(input=pixel)
tensor = layer.tensor(a=pixel, b=pixel, size=1000)
cos_sim = layer.cos_sim(a=pixel, b=pixel)
trans = layer.trans(input=tensor)
print layer.parse_network(addto, linear_comb, interpolation, power,
scaling, slope, tensor, cos_sim, trans)
class ReshapeLayerTest(unittest.TestCase):
def test_reshape_layer(self):
block_expand = layer.block_expand(
input=conv, num_channels=4, stride_x=1, block_x=1)
expand = layer.expand(
input=weight,
expand_as=pixel,
expand_level=layer.ExpandLevel.FROM_TIMESTEP)
repeat = layer.repeat(input=pixel, num_repeats=4)
reshape = layer.seq_reshape(input=pixel, reshape_size=4)
rotate = layer.rotate(input=pixel, height=16, width=49)
print layer.parse_network(block_expand, expand, repeat, reshape, rotate)
class RecurrentLayerTest(unittest.TestCase):
def test_recurrent_layer(self):
word = layer.data(name='word', type=data_type.integer_value(12))
recurrent = layer.recurrent(input=word)
lstm = layer.lstmemory(input=word)
gru = layer.grumemory(input=word)
print layer.parse_network(recurrent, lstm, gru)
class CostLayerTest(unittest.TestCase): class CostLayerTest(unittest.TestCase):
...@@ -51,12 +139,120 @@ class CostLayerTest(unittest.TestCase): ...@@ -51,12 +139,120 @@ class CostLayerTest(unittest.TestCase):
cost10 = layer.sum_cost(input=inference) cost10 = layer.sum_cost(input=inference)
cost11 = layer.huber_cost(input=score, label=label) cost11 = layer.huber_cost(input=score, label=label)
print dir(layer) print layer.parse_network(cost1, cost2)
layer.parse_network(cost1, cost2) print layer.parse_network(cost3, cost4)
print dir(layer) print layer.parse_network(cost5, cost6)
#print layer.parse_network(cost3, cost4) print layer.parse_network(cost7, cost8, cost9, cost10, cost11)
#print layer.parse_network(cost5, cost6)
#print layer.parse_network(cost7, cost8, cost9, cost10, cost11) crf = layer.crf(input=inference, label=label)
crf_decoding = layer.crf_decoding(input=inference, size=3)
ctc = layer.ctc(input=inference, label=label)
warp_ctc = layer.warp_ctc(input=pixel, label=label)
nce = layer.nce(input=inference, label=label, num_classes=3)
hsigmoid = layer.hsigmoid(input=inference, label=label, num_classes=3)
print layer.parse_network(crf, crf_decoding, ctc, warp_ctc, nce,
hsigmoid)
class OtherLayerTest(unittest.TestCase):
def test_sampling_layer(self):
maxid = layer.max_id(input=inference)
sampling_id = layer.sampling_id(input=inference)
eos = layer.eos(input=maxid, eos_id=5)
print layer.parse_network(maxid, sampling_id, eos)
def test_slicing_joining_layer(self):
pad = layer.pad(input=conv, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
print layer.parse_network(pad)
class ProjOpTest(unittest.TestCase):
def test_projection(self):
input = layer.data(name='data', type=data_type.dense_vector(784))
word = layer.data(
name='word', type=data_type.integer_value_sequence(10000))
fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid())
fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid())
mixed0 = layer.mixed(
size=256,
input=[
layer.full_matrix_projection(input=fc0),
layer.full_matrix_projection(input=fc1)
])
with layer.mixed(size=200) as mixed1:
mixed1 += layer.full_matrix_projection(input=fc0)
mixed1 += layer.identity_projection(input=fc1)
table = layer.table_projection(input=word)
emb0 = layer.mixed(size=512, input=table)
with layer.mixed(size=512) as emb1:
emb1 += table
scale = layer.scaling_projection(input=fc0)
scale0 = layer.mixed(size=100, input=scale)
with layer.mixed(size=100) as scale1:
scale1 += scale
dotmul = layer.dotmul_projection(input=fc0)
dotmul0 = layer.mixed(size=100, input=dotmul)
with layer.mixed(size=100) as dotmul1:
dotmul1 += dotmul
context = layer.context_projection(input=fc0, context_len=5)
context0 = layer.mixed(size=100, input=context)
with layer.mixed(size=100) as context1:
context1 += context
conv = layer.conv_projection(
input=input,
filter_size=1,
num_channels=1,
num_filters=128,
stride=1,
padding=0)
conv0 = layer.mixed(input=conv, bias_attr=True)
with layer.mixed(bias_attr=True) as conv1:
conv1 += conv
print layer.parse_network(mixed0)
print layer.parse_network(mixed1)
print layer.parse_network(emb0)
print layer.parse_network(emb1)
print layer.parse_network(scale0)
print layer.parse_network(scale1)
print layer.parse_network(dotmul0)
print layer.parse_network(dotmul1)
print layer.parse_network(conv0)
print layer.parse_network(conv1)
def test_operator(self):
ipt0 = layer.data(name='data', type=data_type.dense_vector(784))
ipt1 = layer.data(name='word', type=data_type.dense_vector(128))
fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
dotmul_op = layer.dotmul_operator(a=fc0, b=fc1)
dotmul0 = layer.mixed(input=dotmul_op)
with layer.mixed() as dotmul1:
dotmul1 += dotmul_op
conv = layer.conv_operator(
img=ipt0,
filter=ipt1,
filter_size=1,
num_channels=1,
num_filters=128,
stride=1,
padding=0)
conv0 = layer.mixed(input=conv)
with layer.mixed() as conv1:
conv1 += conv
print layer.parse_network(dotmul0)
print layer.parse_network(dotmul1)
print layer.parse_network(conv0)
print layer.parse_network(conv1)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -17,6 +17,7 @@ setup(name='paddle', ...@@ -17,6 +17,7 @@ setup(name='paddle',
'': '${CMAKE_CURRENT_SOURCE_DIR}' '': '${CMAKE_CURRENT_SOURCE_DIR}'
}, },
install_requires = [ install_requires = [
'scikit-learn>=0.18.0' 'scikit-learn>=0.18.0',
'scipy>=0.18.0',
] ]
) )
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册