提交 c7483bf9 编写于 作者: C caoying03

add configuration helper for prelu layer.

上级 29a0bc83
...@@ -441,3 +441,12 @@ eos ...@@ -441,3 +441,12 @@ eos
--- ---
.. autoclass:: paddle.v2.layer.eos .. autoclass:: paddle.v2.layer.eos
:noindex: :noindex:
Activation with learnable parameter
===================
prelu
--------
.. autoclass:: paddle.v2.layer.prelu
:noindex:
...@@ -73,7 +73,6 @@ To use this from paddle_trainer, paddle_trainer should be called with ...@@ -73,7 +73,6 @@ To use this from paddle_trainer, paddle_trainer should be called with
--config_args=extension_module_name=[MODULE_NAME] --config_args=extension_module_name=[MODULE_NAME]
''' '''
import copy import copy
import logging import logging
import os import os
...@@ -1731,9 +1730,10 @@ class ParameterReluLayer(LayerBase): ...@@ -1731,9 +1730,10 @@ class ParameterReluLayer(LayerBase):
def __init__(self, name, inputs, partial_sum=1, **args): def __init__(self, name, inputs, partial_sum=1, **args):
super(ParameterReluLayer, self).__init__( super(ParameterReluLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **args) name, self.layer_type, 0, inputs=inputs, **args)
config_assert(len(self.inputs) == 1)
config_assert(self.input_layer.size % partial_sum == 0)
input_layer = self.get_input_layer(0) input_layer = self.get_input_layer(0)
config_assert(len(self.inputs) == 1, "prelu layer has only one input.")
config_assert(input_layer.size % partial_sum == 0,
"a wrong setting for partial_sum")
self.set_layer_size(input_layer.size) self.set_layer_size(input_layer.size)
self.create_input_parameter(0, input_layer.size / partial_sum) self.create_input_parameter(0, input_layer.size / partial_sum)
......
...@@ -31,31 +31,31 @@ except ImportError: ...@@ -31,31 +31,31 @@ except ImportError:
import copy import copy
__all__ = [ __all__ = [
"full_matrix_projection", 'full_matrix_projection',
"AggregateLevel", 'AggregateLevel',
"ExpandLevel", 'ExpandLevel',
"identity_projection", 'identity_projection',
"dotmul_projection", 'dotmul_projection',
"dotmul_operator", 'dotmul_operator',
"repeat_layer", 'repeat_layer',
"seq_reshape_layer", 'seq_reshape_layer',
"table_projection", 'table_projection',
"mixed_layer", 'mixed_layer',
"data_layer", 'data_layer',
"embedding_layer", 'embedding_layer',
"fc_layer", 'fc_layer',
"grumemory", 'grumemory',
"pooling_layer", 'pooling_layer',
"lstmemory", 'lstmemory',
"last_seq", 'last_seq',
"first_seq", 'first_seq',
"cos_sim", 'cos_sim',
"hsigmoid", 'hsigmoid',
"conv_projection", 'conv_projection',
"mse_cost", 'mse_cost',
"regression_cost", 'regression_cost',
'classification_cost', 'classification_cost',
"LayerOutput", 'LayerOutput',
'img_conv_layer', 'img_conv_layer',
'img_pool_layer', 'img_pool_layer',
'batch_norm_layer', 'batch_norm_layer',
...@@ -121,6 +121,7 @@ __all__ = [ ...@@ -121,6 +121,7 @@ __all__ = [
'smooth_l1_cost', 'smooth_l1_cost',
'layer_support', 'layer_support',
'multiplex_layer', 'multiplex_layer',
'prelu_layer',
] ]
...@@ -129,26 +130,26 @@ class LayerType(object): ...@@ -129,26 +130,26 @@ class LayerType(object):
Layer type enumerations. Layer type enumerations.
""" """
DATA = "data" DATA = 'data'
MIXED_LAYER = "mixed" MIXED_LAYER = 'mixed'
LSTMEMORY = "lstmemory" LSTMEMORY = 'lstmemory'
GRUMEMORY = "gated_recurrent" GRUMEMORY = 'gated_recurrent'
SEQUENCE_LAST_INSTANCE = "seqlastins" SEQUENCE_LAST_INSTANCE = 'seqlastins'
SEQUENCE_FIRST_INSTANCE = "seqfirstins" SEQUENCE_FIRST_INSTANCE = 'seqfirstins'
SEQUENCE_RESHAPE = "seqreshape" SEQUENCE_RESHAPE = 'seqreshape'
POOLING_MAX = "max" POOLING_MAX = 'max'
POOLING_AVG = 'average' POOLING_AVG = 'average'
FC_LAYER = "fc" FC_LAYER = 'fc'
COST = 'cost' COST = 'cost'
COSINE_SIM_VEC = 'cos_vm' COSINE_SIM_VEC = 'cos_vm'
COSINE_SIM = 'cos' COSINE_SIM = 'cos'
HSIGMOID = 'hsigmoid' HSIGMOID = 'hsigmoid'
CONV_LAYER = "conv" CONV_LAYER = 'conv'
CONVTRANS_LAYER = "convt" CONVTRANS_LAYER = 'convt'
EXCONV_LAYER = "exconv" EXCONV_LAYER = 'exconv'
EXCONVTRANS_LAYER = "exconvt" EXCONVTRANS_LAYER = 'exconvt'
CUDNNCONV_LAYER = "cudnn_conv" CUDNNCONV_LAYER = 'cudnn_conv'
POOL_LAYER = "pool" POOL_LAYER = 'pool'
BATCH_NORM_LAYER = 'batch_norm' BATCH_NORM_LAYER = 'batch_norm'
NORM_LAYER = 'norm' NORM_LAYER = 'norm'
SUM_TO_ONE_NORM_LAYER = 'sum_to_one_norm' SUM_TO_ONE_NORM_LAYER = 'sum_to_one_norm'
...@@ -177,36 +178,38 @@ class LayerType(object): ...@@ -177,36 +178,38 @@ class LayerType(object):
EOSID_LAYER = 'eos_id' EOSID_LAYER = 'eos_id'
RECURRENT_LAYER = 'recurrent' RECURRENT_LAYER = 'recurrent'
CONV_SHIFT_LAYER = "conv_shift" CONV_SHIFT_LAYER = 'conv_shift'
TENSOR_LAYER = "tensor" TENSOR_LAYER = 'tensor'
SEL_FC_LAYER = "selective_fc" SEL_FC_LAYER = 'selective_fc'
SAMPLING_ID_LAYER = "sampling_id" SAMPLING_ID_LAYER = 'sampling_id'
SLOPE_INTERCEPT_LAYER = "slope_intercept" SLOPE_INTERCEPT_LAYER = 'slope_intercept'
LINEAR_COMBINATION_LAYER = "convex_comb" LINEAR_COMBINATION_LAYER = 'convex_comb'
BLOCK_EXPAND = "blockexpand" BLOCK_EXPAND = 'blockexpand'
MAXOUT = "maxout" MAXOUT = 'maxout'
SPP_LAYER = "spp" SPP_LAYER = 'spp'
PAD_LAYER = "pad" PAD_LAYER = 'pad'
MULTIPLEX_LAYER = "multiplex" MULTIPLEX_LAYER = 'multiplex'
PRINT_LAYER = "print" PRINT_LAYER = 'print'
PRIORBOX_LAYER = "priorbox" PRIORBOX_LAYER = 'priorbox'
CTC_LAYER = "ctc" CTC_LAYER = 'ctc'
WARP_CTC_LAYER = "warp_ctc" WARP_CTC_LAYER = 'warp_ctc'
CRF_LAYER = "crf" CRF_LAYER = 'crf'
CRF_DECODING_LAYER = "crf_decoding" CRF_DECODING_LAYER = 'crf_decoding'
NCE_LAYER = 'nce' NCE_LAYER = 'nce'
RANK_COST = "rank-cost" RANK_COST = 'rank-cost'
LAMBDA_COST = "lambda_cost" LAMBDA_COST = 'lambda_cost'
HUBER = "huber" HUBER = 'huber'
CROSS_ENTROPY = "multi-class-cross-entropy" CROSS_ENTROPY = 'multi-class-cross-entropy'
CROSS_ENTROPY_WITH_SELFNORM = "multi_class_cross_entropy_with_selfnorm" CROSS_ENTROPY_WITH_SELFNORM = 'multi_class_cross_entropy_with_selfnorm'
SOFT_BIN_CLASS_CROSS_ENTROPY = "soft_binary_class_cross_entropy" SOFT_BIN_CLASS_CROSS_ENTROPY = 'soft_binary_class_cross_entropy'
MULTI_BIN_LABEL_CROSS_ENTROPY = "multi_binary_label_cross_entropy" MULTI_BIN_LABEL_CROSS_ENTROPY = 'multi_binary_label_cross_entropy'
SUM_COST = "sum_cost" SUM_COST = 'sum_cost'
SMOOTH_L1 = "smooth_l1" SMOOTH_L1 = 'smooth_l1'
PRELU = 'prelu'
@staticmethod @staticmethod
def is_layer_type(type_name): def is_layer_type(type_name):
...@@ -5551,3 +5554,69 @@ def multiplex_layer(input, name=None, layer_attr=None): ...@@ -5551,3 +5554,69 @@ def multiplex_layer(input, name=None, layer_attr=None):
layer_type=LayerType.MULTIPLEX_LAYER, layer_type=LayerType.MULTIPLEX_LAYER,
parents=input, parents=input,
size=l.config.size) size=l.config.size)
@wrap_name_default()
@layer_support()
@wrap_name_default()
@wrap_param_attr_default()
def prelu_layer(input,
name=None,
partial_sum=1,
param_attr=None,
layer_attr=None):
"""
The Parameter Relu activation that actives outputs with a learnable weight.
Reference:
Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification http://arxiv.org/pdf/1502.01852v1.pdf
.. math::
z_i &\\quad if \\quad z_i > 0 \\\\
a_i * z_i &\\quad \\mathrm{otherwise}
:param name: Name of this layer.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput
:param partial_sum: this parameter makes a group of inputs share a same weight.
1. partial_sum = 1 indicates the element-wise activation:
each element has a weight
2. partial_sum = number of elements in one channel indicates the channel-wise
activation, elements in a channel share a same weight
3. partial_sum = number of outputs indicates all elements share a same weight
:type int
:param param_attr: The parameter attribute. See ParameterAttribute for details.
:type param_attr: ParameterAttribute|None
:param layer_attr: Extra layer configurations. Default is None.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
if isinstance(input, collections.Sequence):
assert len(input) == 1, 'prelu_layer only accepts one input'
else:
input = [input]
assert isinstance(input[0], LayerOutput)
if isinstance(param_attr, collections.Sequence):
assert len(param_attr) == 1, (
'because prelu_layer only accepts one input '
'it requires only one parameter setting.')
else:
param_attr = [param_attr]
assert isinstance(param_attr[0], ParameterAttribute)
l = Layer(
name=name,
type='prelu',
inputs=Input(input[0].name, **param_attr[0].attr),
partial_sum=partial_sum,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.PRELU,
parents=input,
size=l.config.size)
...@@ -5,6 +5,7 @@ last_first_seq test_expand_layer test_ntm_layers test_hsigmoid ...@@ -5,6 +5,7 @@ last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer) test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer
test_prelu_layer)
export whole_configs=(test_split_datasource) export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "input"
type: "data"
size: 300
active_type: ""
}
layers {
name: "__prelu_layer_0__"
type: "prelu"
size: 300
active_type: ""
inputs {
input_layer_name: "input"
input_parameter_name: "___prelu_layer_0__.w0"
}
}
parameters {
name: "___prelu_layer_0__.w0"
size: 300
initial_mean: 0.0
initial_std: 0.057735026919
initial_strategy: 0
initial_smart: true
}
input_layer_names: "input"
output_layer_names: "__prelu_layer_0__"
sub_models {
name: "root"
layer_names: "input"
layer_names: "__prelu_layer_0__"
input_layer_names: "input"
output_layer_names: "__prelu_layer_0__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
data = data_layer(name='input', size=300)
prelu = prelu_layer(input=data)
outputs(prelu)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册