diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 18cbd44f49d5867690a4aac615a5e2129cbecffe..e0a43f6b6e157c8eadb160e6f942617cff510a3f 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1803,9 +1803,8 @@ class ConvTransLayer(ConvTransLayerBase): @config_layer('norm') class NormLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): - super(NormLayer, self).__init__( - name, 'norm', 0, inputs=inputs, device=device, **xargs) + def __init__(self, name, inputs, **xargs): + super(NormLayer, self).__init__(name, 'norm', 0, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) norm_conf = self.config.inputs[input_index].norm_conf @@ -1817,9 +1816,8 @@ class NormLayer(LayerBase): @config_layer('pool') class PoolLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): - super(PoolLayer, self).__init__( - name, 'pool', 0, inputs=inputs, device=device, **xargs) + def __init__(self, name, inputs, **xargs): + super(PoolLayer, self).__init__(name, 'pool', 0, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) pool_conf = self.config.inputs[input_index].pool_conf @@ -1851,7 +1849,6 @@ class BatchNormLayer(LayerBase): inputs, active_type="linear", bias=True, - device=None, use_global_stats=True, moving_average_fraction=0.9, batch_norm_type=None, @@ -1893,7 +1890,6 @@ class BatchNormLayer(LayerBase): 0, active_type=active_type, inputs=inputs, - device=device, **xargs) if use_global_stats is not None: @@ -1927,9 +1923,9 @@ class BatchNormLayer(LayerBase): @config_layer('trans') class TransLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): + def __init__(self, name, inputs, **xargs): super(TransLayer, self).__init__( - name, 'trans', 0, inputs=inputs, device=device, **xargs) + name, 'trans', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'TransLayer must have one and only one input') @@ -1938,9 +1934,9 @@ class TransLayer(LayerBase): @config_layer('resize') class ResizeLayer(LayerBase): - def __init__(self, name, size, inputs, device=None, **xargs): + def __init__(self, name, size, inputs, **xargs): super(ResizeLayer, self).__init__( - name, 'resize', size=size, inputs=inputs, device=device, **xargs) + name, 'resize', size=size, inputs=inputs, **xargs) config_assert( len(self.inputs) == 1, 'ResizeLayer must have one and only one input') @@ -2265,15 +2261,9 @@ def Generator( @config_layer('expand') class ExpandLayer(LayerBase): - def __init__(self, - name, - inputs, - trans_type='non-seq', - device=None, - bias=False, - **xargs): + def __init__(self, name, inputs, trans_type='non-seq', bias=False, **xargs): super(ExpandLayer, self).__init__( - name, 'expand', 0, inputs=inputs, device=device, **xargs) + name, 'expand', 0, inputs=inputs, **xargs) config_assert( len(self.inputs) == 2, 'ExpandLayer takes 2 and only 2 inputs') self.config.trans_type = trans_type @@ -2304,12 +2294,10 @@ class MaxLayer(LayerBase): inputs, trans_type='non-seq', active_type='linear', - device=None, bias=False, output_max_index=None, **xargs): - super(MaxLayer, self).__init__( - name, 'max', 0, inputs=inputs, device=device, **xargs) + super(MaxLayer, self).__init__(name, 'max', 0, inputs=inputs, **xargs) config_assert(len(self.inputs) == 1, 'MaxLayer must have 1 input') self.config.trans_type = trans_type self.config.active_type = active_type @@ -2356,7 +2344,6 @@ class SequenceLastInstanceLayer(LayerBase): inputs, active_type='linear', trans_type='non-seq', - device=None, bias=False, **xargs): super(SequenceLastInstanceLayer, self).__init__( @@ -2364,7 +2351,6 @@ class SequenceLastInstanceLayer(LayerBase): 'seqlastins', 0, inputs=inputs, - device=device, active_type=active_type, **xargs) config_assert( @@ -2378,39 +2364,32 @@ class SequenceLastInstanceLayer(LayerBase): @config_layer('seqfirstins') class SequenceFirstInstanceLayer(SequenceLastInstanceLayer): - def __init__( - self, - name, - inputs, - active_type='linear', - trans_type='non-seq', - device=None, - bias=False, ): + def __init__(self, + name, + inputs, + active_type='linear', + trans_type='non-seq', + bias=False, + **xargs): super(SequenceFirstInstanceLayer, self).__init__( name, inputs=inputs, active_type=active_type, device=device, - bias=bias) + bias=bias, + **xargs) self.config.trans_type = trans_type self.config.select_first = True @config_layer('seqconcat') class SequenceConcatLayer(LayerBase): - def __init__(self, - name, - inputs, - active_type='linear', - device=None, - bias=False, - **xargs): + def __init__(self, name, inputs, active_type='linear', bias=False, **xargs): super(SequenceConcatLayer, self).__init__( name, 'seqconcat', 0, inputs=inputs, - device=device, active_type=active_type, **xargs) config_assert( @@ -2428,7 +2407,6 @@ class SequenceReshapeLayer(LayerBase): size, inputs, active_type='linear', - device=None, bias=False, **xargs): super(SequenceReshapeLayer, self).__init__( @@ -2436,7 +2414,6 @@ class SequenceReshapeLayer(LayerBase): 'seqreshape', size, inputs=inputs, - device=device, active_type=active_type, **xargs) config_assert( @@ -2447,21 +2424,9 @@ class SequenceReshapeLayer(LayerBase): @config_layer('subseq') class SubSequenceLayer(LayerBase): - def __init__(self, - name, - inputs, - active_type='linear', - device=None, - bias=False, - **xargs): + def __init__(self, name, inputs, active_type='linear', bias=False, **xargs): super(SubSequenceLayer, self).__init__( - name, - 'subseq', - 0, - inputs=inputs, - device=device, - active_type=active_type, - **xargs) + name, 'subseq', 0, inputs=inputs, active_type=active_type, **xargs) config_assert(len(inputs) == 3, 'SubSequenceLayer must have 3 inputs') input_layer0 = self.get_input_layer(0) size = input_layer0.size @@ -2471,9 +2436,9 @@ class SubSequenceLayer(LayerBase): @config_layer('out_prod') class OuterProdLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): + def __init__(self, name, inputs, device=None): super(OuterProdLayer, self).__init__( - name, 'out_prod', 0, inputs=inputs, device=device, **xargs) + name, 'out_prod', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'OuterProdLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) @@ -2482,9 +2447,9 @@ class OuterProdLayer(LayerBase): @config_layer('power') class PowerLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): + def __init__(self, name, inputs, device=None): super(PowerLayer, self).__init__( - name, 'power', 0, inputs=inputs, device=device, **xargs) + name, 'power', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'PowerLayer must have 2 inputs') input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer1.size) @@ -2495,8 +2460,13 @@ class PowerLayer(LayerBase): @config_layer('slope_intercept') class SlopeInterceptLayer(LayerBase): - def __init__(self, name, inputs, slope=1.0, intercept=0.0, - device=None, **xargs): + def __init__(self, + name, + inputs, + slope=1.0, + intercept=0.0, + device=None, + **xargs): super(SlopeInterceptLayer, self).__init__( name, 'slope_intercept', 0, inputs=inputs, device=device, **xargs) self.config.slope = slope @@ -2508,9 +2478,9 @@ class SlopeInterceptLayer(LayerBase): @config_layer('scaling') class ScalingLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): + def __init__(self, name, inputs, device=None): super(ScalingLayer, self).__init__( - name, 'scaling', 0, inputs=inputs, device=device, **xargs) + name, 'scaling', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'ScalingLayer must have 2 inputs') input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer1.size) @@ -2521,9 +2491,9 @@ class ScalingLayer(LayerBase): @config_layer('conv_shift') class ConvShiftLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): + def __init__(self, name, inputs, device=None): super(ConvShiftLayer, self).__init__( - name, 'conv_shift', 0, inputs=inputs, device=device, **xargs) + name, 'conv_shift', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'ConvShiftLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) self.set_layer_size(input_layer0.size) @@ -2531,9 +2501,9 @@ class ConvShiftLayer(LayerBase): @config_layer('convex_comb') class ConvexCombinationLayer(LayerBase): - def __init__(self, name, size, inputs, device=None, **xargs): + def __init__(self, name, size, inputs, device=None): super(ConvexCombinationLayer, self).__init__( - name, 'convex_comb', size, inputs=inputs, device=device, **xargs) + name, 'convex_comb', size, inputs=inputs, device=device) config_assert( len(self.inputs) == 2, 'ConvexCombinationLayer must have 2 inputs') config_assert( @@ -2572,9 +2542,9 @@ class BilinearInterpLayer(LayerBase): @config_layer('sum_to_one_norm') class SumToOneNormLayer(LayerBase): - def __init__(self, name, inputs, device=None, **xargs): + def __init__(self, name, inputs, device=None): super(SumToOneNormLayer, self).__init__( - name, 'sum_to_one_norm', 0, inputs=inputs, device=device, **xargs) + name, 'sum_to_one_norm', 0, inputs=inputs, device=device) config_assert( len(self.inputs) == 1, 'SumToOneNormLayer must have 1 input') input_layer0 = self.get_input_layer(0) @@ -2619,17 +2589,10 @@ class AverageLayer(LayerBase): average_strategy='average', trans_type='non-seq', active_type='linear', - device=None, bias=False, **xargs): super(AverageLayer, self).__init__( - name, - 'average', - 0, - inputs=inputs, - device=device, - active_type=active_type, - **xargs) + name, 'average', 0, inputs=inputs, active_type=active_type, **xargs) self.config.average_strategy = average_strategy self.config.trans_type = trans_type config_assert(len(inputs) == 1, 'AverageLayer must have 1 input') @@ -2653,9 +2616,9 @@ class CosSimLayer(LayerBase): @config_layer('tensor') class TensorLayer(LayerBase): - def __init__(self, name, size, inputs, device=None, bias=True, **xargs): + def __init__(self, name, size, inputs, bias=True, **xargs): super(TensorLayer, self).__init__( - name, 'tensor', size, inputs=inputs, device=device, **xargs) + name, 'tensor', size, inputs=inputs, **xargs) config_assert(len(self.inputs) == 2, 'TensorLayer must have 2 inputs') config_assert(size > 0, 'size must be positive') config_assert(inputs[1].parameter_name == None,