diff --git a/python/paddle/trainer_config_helpers/default_decorators.py b/python/paddle/trainer_config_helpers/default_decorators.py index ad3efcbf369411b9c42b2a32ed05b04f86bf7de6..2f25579fcdd9793e4c165439c9934a2bccb63617 100644 --- a/python/paddle/trainer_config_helpers/default_decorators.py +++ b/python/paddle/trainer_config_helpers/default_decorators.py @@ -52,6 +52,10 @@ def wrap_param_default(param_names=None, kwargs[name] = default_factory(func) return func(*args, **kwargs) + if hasattr(func, 'argspec'): + __wrapper__.argspec = func.argspec + else: + __wrapper__.argspec = inspect.getargspec(func) return __wrapper__ return __impl__ diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 1bb1a01d509e6412c254fce856101137e66b1e12..b68460b6a3ab621904f4dc4e48352044ab265a38 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -14,6 +14,7 @@ import functools import collections +import inspect from paddle.trainer.config_parser import * from .activations import LinearActivation, SigmoidActivation, TanhActivation, \ @@ -316,6 +317,11 @@ def layer_support(*attrs): val.check(method.__name__) return method(*args, **kwargs) + if hasattr(method, 'argspec'): + wrapper.argspec = method.argspec + else: + wrapper.argspec = inspect.getargspec(method) + return wrapper return decorator diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index d15e6398f51f43c1eeab67bba654f91cc56135a4..d68b66cf02ce891e14312a64be74c59c795da33f 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -67,6 +67,7 @@ paddle.v2.parameters.create, no longer exposed to users. """ import collections +import inspect import paddle.trainer_config_helpers as conf_helps from paddle.trainer_config_helpers.config_parser_utils import \ @@ -74,26 +75,14 @@ from paddle.trainer_config_helpers.config_parser_utils import \ from paddle.trainer_config_helpers.default_decorators import wrap_name_default from paddle.trainer_config_helpers.default_decorators import wrap_act_default -from paddle.trainer_config_helpers.default_decorators import wrap_bias_attr_default +from paddle.trainer_config_helpers.default_decorators import \ + wrap_bias_attr_default from paddle.trainer_config_helpers.layers import layer_support import data_type import activation -import attr - -__all__ = [ - 'parse_network', 'data', 'fc', 'conv_shift', 'img_conv', 'img_pool', 'spp', - 'maxout', 'img_cmrnorm', 'batch_norm', 'sum_to_one_norm', 'recurrent', - 'lstmemory', 'grumemory', 'pool', 'last_seq', 'first_seq', 'concat', - 'seq_concat', 'block_expand', 'expand', 'repeat', 'seq_reshape', 'addto', - 'linear_comb', 'interpolation', 'bilinear_interp', 'power', 'scaling', - 'slope_intercept', 'tensor', 'cos_sim', 'trans', 'max_id', 'sampling_id', - 'pad', 'classification_cost', 'cross_entropy_cost', - 'cross_entropy_with_selfnorm_cost', 'regression_cost', - 'multi_binary_label_cross_entropy_cost', 'rank_cost', 'lambda_cost', - 'sum_cost', 'huber_cost', 'crf', 'crf_decoding', 'ctc', 'warp_ctc', 'nce', - 'hsigmoid', 'eos' -] + +__all__ = ['parse_network', 'data'] __projection_names__ = filter(lambda x: x.endswith('_projection'), dir(conf_helps)) @@ -288,83 +277,51 @@ data = DataLayerV2 AggregateLevel = conf_helps.layers.AggregateLevel ExpandLevel = conf_helps.layers.ExpandLevel -layer_list = [ - # [V2LayerImpl, V1_method_name, parent_names] - # fully connected layers - ['fc', 'fc_layer', ['input']], - # conv layers - ['conv_shift', 'conv_shift_layer', ['a', 'b']], - ['img_conv', 'img_conv_layer', ['input']], - # image pooling layers - ['img_pool', 'img_pool_layer', ['input']], - ['spp', 'spp_layer', ['input']], - ['maxout', 'maxout_layer', ['input']], - # norm layers - ['img_cmrnorm', 'img_cmrnorm_layer', ['input']], - ['batch_norm', 'batch_norm_layer', ['input']], - ['sum_to_one_norm', 'sum_to_one_norm_layer', ['input']], - # recurrent layers - ['recurrent', 'recurrent_layer', ['input']], - ['lstmemory', 'lstmemory', ['input']], - ['grumemory', 'grumemory', ['input']], - # aggregate layers - ['pool', 'pooling_layer', ['input']], - ['last_seq', 'last_seq', ['input']], - ['first_seq', 'first_seq', ['input']], - ['concat', 'concat_layer', ['input']], - ['seq_concat', 'seq_concat_layer', ['a', 'b']], - # reshaping layers - ['block_expand', 'block_expand_layer', ['input']], - ['expand', 'expand_layer', ['input', 'expand_as']], - ['repeat', 'repeat_layer', ['input']], - ['rotate', 'rotate_layer', ['input']], - ['seq_reshape', 'seq_reshape_layer', ['input']], - # math layers - ['addto', 'addto_layer', ['input']], - ['linear_comb', 'linear_comb_layer', ['weights', 'vectors']], - ['interpolation', 'interpolation_layer', ['input', 'weight']], - ['bilinear_interp', 'bilinear_interp_layer', ['input']], - ['power', 'power_layer', ['input', 'weight']], - ['scaling', 'scaling_layer', ['input', 'weight']], - ['slope_intercept', 'slope_intercept_layer', ['input']], - ['tensor', 'tensor_layer', ['a', 'b']], - ['cos_sim', 'cos_sim', ['a', 'b']], - ['trans', 'trans_layer', ['input']], - # sampling layers - ['max_id', 'maxid_layer', ['input']], - ['sampling_id', 'sampling_id_layer', ['input']], - # slicing and joining layers - ['pad', 'pad_layer', ['input']], - # cost layers - [ - 'classification_cost', 'classification_cost', - ['input', 'label', 'weight'] - ], - ['regression_cost', 'regression_cost', ['input', 'label', 'weight']], - ['cross_entropy_cost', 'cross_entropy', ['input', 'label']], - [ - 'cross_entropy_with_selfnorm_cost', 'cross_entropy_with_selfnorm', - ['input', 'label'] - ], - [ - 'multi_binary_label_cross_entropy_cost', - 'multi_binary_label_cross_entropy', ['input', 'label'] - ], - ['rank_cost', 'rank_cost', ['left', 'right', 'label', 'weight']], - ['lambda_cost', 'lambda_cost', ['input', 'score']], - ['sum_cost', 'sum_cost', ['input']], - ['huber_cost', 'huber_cost', ['input', 'label']], - ['crf', 'crf_layer', ['input', 'label']], - ['crf_decoding', 'crf_decoding_layer', ['input']], - ['ctc', 'ctc_layer', ['input', 'label']], - ['warp_ctc', 'warp_ctc_layer', ['input', 'label']], - ['nce', 'nce_layer', ['input', 'label']], - ['hsigmoid', 'hsigmoid', ['input', 'label']], - # check layers - ['eos', 'eos_layer', ['input']] -] -for l in layer_list: - globals()[l[0]] = __convert_to_v2__(l[1], l[2]) + +def __layer_name_mapping__(inname): + if inname in ['data_layer', 'memory', 'mixed_layer']: + # Do Not handle these layers + return + elif inname == 'maxid_layer': + return 'max_id' + elif inname.endswith('memory') or inname.endswith( + '_seq') or inname.endswith('_sim') or inname == 'hsigmoid': + return inname + elif inname in [ + 'cross_entropy', 'multi_binary_label_cross_entropy', + 'cross_entropy_with_selfnorm' + ]: + return inname + "_cost" + elif inname.endswith('_cost'): + return inname + elif inname.endswith("_layer"): + return inname[:-len("_layer")] + + +def __layer_name_mapping_parent_names__(inname): + all_args = getattr(conf_helps, inname).argspec.args + return filter( + lambda x: x in ['input1', 'input2','label', 'input', 'a', 'b', 'expand_as', + 'weights', 'vectors', 'weight', 'score', 'left', 'right'], + all_args) + + +def __convert_layer__(_new_name_, _old_name_, _parent_names_): + global __all__ + __all__.append(_new_name_) + globals()[new_name] = __convert_to_v2__(_old_name_, _parent_names_) + + +for each_layer_name in dir(conf_helps): + new_name = __layer_name_mapping__(each_layer_name) + if new_name is not None: + parent_names = __layer_name_mapping_parent_names__(each_layer_name) + assert len(parent_names) != 0, each_layer_name + __convert_layer__(new_name, each_layer_name, parent_names) + +del parent_names +del new_name +del each_layer_name # convert projection for prj in __projection_names__: diff --git a/python/paddle/v2/tests/test_layer.py b/python/paddle/v2/tests/test_layer.py index bb0099ea2fbb78b0a05eedf23af95a02e8849015..b138ddbbe6c0a431393fef165b4eaebf7bfa81e4 100644 --- a/python/paddle/v2/tests/test_layer.py +++ b/python/paddle/v2/tests/test_layer.py @@ -11,17 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import difflib import unittest -import paddle.trainer_config_helpers as conf_helps import paddle.v2.activation as activation import paddle.v2.attr as attr import paddle.v2.data_type as data_type import paddle.v2.layer as layer import paddle.v2.pooling as pooling -from paddle.trainer_config_helpers.config_parser_utils import \ - parse_network_config as parse_network pixel = layer.data(name='pixel', type=data_type.dense_vector(128)) label = layer.data(name='label', type=data_type.integer_value(10)) @@ -70,7 +66,7 @@ class ImageLayerTest(unittest.TestCase): class AggregateLayerTest(unittest.TestCase): def test_aggregate_layer(self): - pool = layer.pool( + pool = layer.pooling( input=pixel, pooling_type=pooling.Avg(), agg_level=layer.AggregateLevel.EACH_SEQUENCE)