diff --git a/doc/api/v2/model_configs.rst b/doc/api/v2/model_configs.rst index a9f33b33ef61bf846013364672ec26ae075d0300..b848bd7045a701a1a0d6e6b53da971ada2c569f5 100644 --- a/doc/api/v2/model_configs.rst +++ b/doc/api/v2/model_configs.rst @@ -4,3 +4,32 @@ Layers .. automodule:: paddle.v2.layer :members: + + +========== +Attributes +========== + +.. automodule:: paddle.v2.attr + :members: + +=========== +Activations +=========== + +.. automodule:: paddle.v2.activation + :members: + +======== +Poolings +======== + +.. automodule:: paddle.v2.pooling + :members: + +======== +Networks +======== + +.. automodule:: paddle.v2.networks + :members: diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index b68460b6a3ab621904f4dc4e48352044ab265a38..b94f8f9a783552519ca73e7cfc0937b302d3445b 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -795,17 +795,16 @@ def data_layer(name, size, height=None, width=None, layer_attr=None): .. code-block:: python - data = data_layer(name="input", - size=1000) + data = data_layer(name="input", size=1000) :param name: Name of this data layer. :type name: basestring :param size: Size of this data layer. :type size: int :param height: Height of this data layer, used for image - :type size: int|None + :type height: int|None :param width: Width of this data layer, used for image - :type size: int|None + :type width: int|None :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. diff --git a/python/paddle/v2/activation.py b/python/paddle/v2/activation.py index 1f3aab9ef3c5f69e22d7e83250d0ff46c1ff718a..21261a178203b633ca6cf59a5fc89edc24a868b9 100644 --- a/python/paddle/v2/activation.py +++ b/python/paddle/v2/activation.py @@ -12,26 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.trainer_config_helpers.activations import * +import paddle.trainer_config_helpers.activations +import copy -__all__ = [ - "Base", "Tanh", "Sigmoid", "Softmax", "Identity", "Linear", - 'SequenceSoftmax', "Exp", "Relu", "BRelu", "SoftRelu", "STanh", "Abs", - "Square", "Log" -] +__all__ = [] -Base = BaseActivation -Tanh = TanhActivation -Sigmoid = SigmoidActivation -Softmax = SoftmaxActivation -SequenceSoftmax = SequenceSoftmaxActivation -Identity = IdentityActivation -Linear = Identity -Relu = ReluActivation -BRelu = BReluActivation -SoftRelu = SoftReluActivation -STanh = STanhActivation -Abs = AbsActivation -Square = SquareActivation -Exp = ExpActivation -Log = LogActivation +suffix = 'Activation' +for act in paddle.trainer_config_helpers.activations.__all__: + new_name = act[:-len(suffix)] + globals()[new_name] = copy.copy( + getattr(paddle.trainer_config_helpers.activations, act)) + globals()[new_name].__name__ = new_name + __all__.append(new_name) diff --git a/python/paddle/v2/attr.py b/python/paddle/v2/attr.py index 40c64f621b443d5613468b27d030ab08776641b2..32f78614e7f8abe7cffdc7a50a9fa77f1fc1a780 100644 --- a/python/paddle/v2/attr.py +++ b/python/paddle/v2/attr.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.trainer_config_helpers.attrs import * +import paddle.trainer_config_helpers.attrs __all__ = [ "Param", "Extra", ] -Param = ParameterAttribute -Extra = ExtraLayerAttribute +Param = paddle.trainer_config_helpers.attrs.ParameterAttribute +Extra = paddle.trainer_config_helpers.attrs.ExtraLayerAttribute + +for each in paddle.trainer_config_helpers.attrs.__all__: + globals()[each] = getattr(paddle.trainer_config_helpers.attrs, each) + __all__.append(each) diff --git a/python/paddle/v2/config_base.py b/python/paddle/v2/config_base.py index fa2ccec6c3270541dd6b13fdfd2323d10ceac642..1ec1d7bbdf912b940ca4b8e7b20eb11310f0e74f 100644 --- a/python/paddle/v2/config_base.py +++ b/python/paddle/v2/config_base.py @@ -13,12 +13,55 @@ # limitations under the License. import collections - +import re from paddle.trainer_config_helpers.default_decorators import wrap_name_default import paddle.trainer_config_helpers as conf_helps +class LayerType(type): + def __new__(cls, name, bases, attrs): + method_name = attrs.get('METHOD_NAME', None) + if method_name is not None: + method = getattr(conf_helps, method_name) + if method.__doc__ is not None: + mapper = attrs.get("__map_docstr__", None) + if mapper is not None: + attrs['__doc__'] = LayerType.__map_docstr__( + mapper(method.__doc__), + method_name=method_name, + name=name) + else: + attrs['__doc__'] = LayerType.__map_docstr__( + method.__doc__, method_name=method_name, name=name) + return super(LayerType, cls).__new__(cls, name, bases, attrs) + + @staticmethod + def __map_docstr__(doc, name, method_name): + assert isinstance(doc, basestring) + + # replace LayerOutput to paddle.v2.config_base.Layer + doc = doc.replace("LayerOutput", "paddle.v2.config_base.Layer") + + doc = doc.replace('ParameterAttribute', + 'paddle.v2.attr.ParameterAttribute') + + doc = re.sub(r'ExtraLayerAttribute[^\s]?', + 'paddle.v2.attr.ExtraAttribute', doc) + + # xxx_layer to xxx + doc = re.sub(r"(?P[a-z]+)_layer", r"\g", doc) + + # XxxxActivation to paddle.v2.Activation.Xxxx + doc = re.sub(r"(?P[A-Z][a-zA-Z]+)Activation", + r"paddle.v2.Activation.\g", doc) + + # TODO(yuyang18): Add more rules if needed. + return doc + + class Layer(object): + __metaclass__ = LayerType + def __init__(self, name=None, parent_layers=None): assert isinstance(parent_layers, dict) self.name = name @@ -80,6 +123,8 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True): wrapper = None class V2LayerImpl(Layer): + METHOD_NAME = method_name + def __init__(self, **kwargs): parent_layers = dict() other_kwargs = dict() diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 711226d659d49fc2646c34c011c7773ae2517ec9..1e4efedde363f20fde168941adcb6e8a594b533a 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -28,7 +28,7 @@ The primary usage shows below. act=paddle.activation.Softmax()) # use prediction instance where needed. - parameters = paddle.v2.parameters.create(cost) + parameters = paddle.parameters.create(cost) """ import collections @@ -47,26 +47,32 @@ from paddle.trainer.config_parser import \ RecurrentLayerGroupEnd, model_type import activation +import re import data_type __all__ = ['parse_network', 'data'] -__projection_names__ = filter(lambda x: x.endswith('_projection'), - dir(conf_helps)) -__all__ += __projection_names__ - -__operator_names__ = filter(lambda x: x.endswith('_operator'), dir(conf_helps)) -__all__ += __operator_names__ - def parse_network(*outputs): """ - parse all output layers and then generate a model config proto. - :param outputs: - :return: + Parse all output layers and then generate a ModelConfig object. + + .. note:: + + This function is used internally in paddle.v2 module. User should never + invoke this method. + + :param outputs: Output layers. + :type outputs: Layer + :return: A ModelConfig object instance. + :rtype: ModelConfig """ def __real_func__(): + """ + __real_func__ is the function that config_parser.parse invoked. It is + the plain old paddle configuration function. + """ context = dict() real_output = [each.to_proto(context=context) for each in outputs] conf_helps.outputs(real_output) @@ -81,6 +87,8 @@ So we also need to implement some special LayerV2. class DataLayerV2(Layer): + METHOD_NAME = 'data_layer' + def __init__(self, name, type, **kwargs): assert isinstance(type, data_type.InputType) @@ -99,6 +107,17 @@ class DataLayerV2(Layer): args[each] = self.__kwargs__[each] return getattr(conf_helps, self.__method_name__)(name=self.name, **args) + def __map_docstr__(doc): + doc = re.sub(r'(data = [^\)]+)\).*', + "data = paddle.layer.data(name=\"input\", " + "type=paddle.data_type.dense_vector(1000))", doc) + + doc = re.sub(r':param size:.*', + ':param type: Data type of this data layer', doc) + doc = re.sub(r':type size:.*', + ":type size: paddle.v2.data_type.InputType", doc) + return doc + class WithExtraParent(Layer): def extra_parent(self): @@ -347,6 +366,7 @@ class RecurrentLayerOutput(Layer): LayerV2 = Layer data = DataLayerV2 +data.__name__ = 'data' AggregateLevel = conf_helps.layers.AggregateLevel ExpandLevel = conf_helps.layers.ExpandLevel memory = MemoryV2 @@ -386,6 +406,7 @@ def __convert_layer__(_new_name_, _old_name_, _parent_names_): global __all__ __all__.append(_new_name_) globals()[new_name] = __convert_to_v2__(_old_name_, _parent_names_) + globals()[new_name].__name__ = new_name for each_layer_name in dir(conf_helps): @@ -399,21 +420,6 @@ del parent_names del new_name del each_layer_name -# convert projection -for prj in __projection_names__: - globals()[prj] = __convert_to_v2__( - prj, parent_names=['input'], is_default_name=False) - -# convert operator -operator_list = [ - # [V1_method_name, parent_names], - ['dotmul_operator', ['a', 'b']], - ['conv_operator', ['img', 'filter']] -] -for op in operator_list: - globals()[op[0]] = __convert_to_v2__( - op[0], parent_names=op[1], is_default_name=False) - @wrap_name_default() def recurrent_group(step, input, name=None): @@ -464,3 +470,29 @@ def recurrent_group(step, input, name=None): return retv[0] else: return retv + + +__projection_names__ = filter(lambda x: x.endswith('_projection'), + dir(conf_helps)) + +__all__ += __projection_names__ + +__operator_names__ = filter(lambda x: x.endswith('_operator'), dir(conf_helps)) +__all__ += __operator_names__ + +# convert projection +for prj in __projection_names__: + globals()[prj] = __convert_to_v2__( + prj, parent_names=['input'], is_default_name=False) + globals()[prj].__name__ = prj + +# convert operator +operator_list = [ + # [V1_method_name, parent_names], + ['dotmul_operator', ['a', 'b']], + ['conv_operator', ['img', 'filter']] +] +for op in operator_list: + globals()[op[0]] = __convert_to_v2__( + op[0], parent_names=op[1], is_default_name=False) + globals()[op[0]].__name__ = op[0] diff --git a/python/paddle/v2/networks.py b/python/paddle/v2/networks.py index 74d91593d8551bc1592a78efb704ab3b89f0e0d9..9e6644196c8242cc3fed7a4fb1503697e5b59ffb 100644 --- a/python/paddle/v2/networks.py +++ b/python/paddle/v2/networks.py @@ -38,6 +38,7 @@ def __initialize__(): parent_names=parents, is_default_name='name' in argspec.args) globals()[each_subnetwork] = v2_subnet + globals()[each_subnetwork].__name__ = each_subnetwork global __all__ __all__.append(each_subnetwork) diff --git a/python/paddle/v2/pooling.py b/python/paddle/v2/pooling.py index 9076a159bb4f2c58bf52cba1657c58510637f4f8..4881c27d1d6d3d926f12aab096f377164debf1ef 100644 --- a/python/paddle/v2/pooling.py +++ b/python/paddle/v2/pooling.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.trainer_config_helpers.poolings import * +import paddle.trainer_config_helpers.poolings +import copy -__all__ = ["Max", "CudnnMax", "Avg", "CudnnAvg", "Sum", "SquareRootN"] +__all__ = [] +suffix = 'Pooling' -Max = MaxPooling -CudnnMax = CudnnMaxPooling -Avg = AvgPooling -CudnnAvg = CudnnAvgPooling -Sum = SumPooling -SquareRootN = SquareRootNPooling +for name in paddle.trainer_config_helpers.poolings.__all__: + new_name = name[:-len(suffix)] + globals()[new_name] = copy.copy( + getattr(paddle.trainer_config_helpers.poolings, name)) + globals()[new_name].__name__ = new_name + __all__.append(new_name)