提交 0bf542ec 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #1554 from reyoung/feature/v2_docs_layers

Complete documentation generation for v2 model configuration
...@@ -4,3 +4,32 @@ Layers ...@@ -4,3 +4,32 @@ Layers
.. automodule:: paddle.v2.layer .. automodule:: paddle.v2.layer
:members: :members:
==========
Attributes
==========
.. automodule:: paddle.v2.attr
:members:
===========
Activations
===========
.. automodule:: paddle.v2.activation
:members:
========
Poolings
========
.. automodule:: paddle.v2.pooling
:members:
========
Networks
========
.. automodule:: paddle.v2.networks
:members:
...@@ -795,17 +795,16 @@ def data_layer(name, size, height=None, width=None, layer_attr=None): ...@@ -795,17 +795,16 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
.. code-block:: python .. code-block:: python
data = data_layer(name="input", data = data_layer(name="input", size=1000)
size=1000)
:param name: Name of this data layer. :param name: Name of this data layer.
:type name: basestring :type name: basestring
:param size: Size of this data layer. :param size: Size of this data layer.
:type size: int :type size: int
:param height: Height of this data layer, used for image :param height: Height of this data layer, used for image
:type size: int|None :type height: int|None
:param width: Width of this data layer, used for image :param width: Width of this data layer, used for image
:type size: int|None :type width: int|None
:param layer_attr: Extra Layer Attribute. :param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute. :type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object. :return: LayerOutput object.
......
...@@ -12,26 +12,15 @@ ...@@ -12,26 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.trainer_config_helpers.activations import * import paddle.trainer_config_helpers.activations
import copy
__all__ = [ __all__ = []
"Base", "Tanh", "Sigmoid", "Softmax", "Identity", "Linear",
'SequenceSoftmax', "Exp", "Relu", "BRelu", "SoftRelu", "STanh", "Abs",
"Square", "Log"
]
Base = BaseActivation suffix = 'Activation'
Tanh = TanhActivation for act in paddle.trainer_config_helpers.activations.__all__:
Sigmoid = SigmoidActivation new_name = act[:-len(suffix)]
Softmax = SoftmaxActivation globals()[new_name] = copy.copy(
SequenceSoftmax = SequenceSoftmaxActivation getattr(paddle.trainer_config_helpers.activations, act))
Identity = IdentityActivation globals()[new_name].__name__ = new_name
Linear = Identity __all__.append(new_name)
Relu = ReluActivation
BRelu = BReluActivation
SoftRelu = SoftReluActivation
STanh = STanhActivation
Abs = AbsActivation
Square = SquareActivation
Exp = ExpActivation
Log = LogActivation
...@@ -12,12 +12,16 @@ ...@@ -12,12 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.trainer_config_helpers.attrs import * import paddle.trainer_config_helpers.attrs
__all__ = [ __all__ = [
"Param", "Param",
"Extra", "Extra",
] ]
Param = ParameterAttribute Param = paddle.trainer_config_helpers.attrs.ParameterAttribute
Extra = ExtraLayerAttribute Extra = paddle.trainer_config_helpers.attrs.ExtraLayerAttribute
for each in paddle.trainer_config_helpers.attrs.__all__:
globals()[each] = getattr(paddle.trainer_config_helpers.attrs, each)
__all__.append(each)
...@@ -13,12 +13,55 @@ ...@@ -13,12 +13,55 @@
# limitations under the License. # limitations under the License.
import collections import collections
import re
from paddle.trainer_config_helpers.default_decorators import wrap_name_default from paddle.trainer_config_helpers.default_decorators import wrap_name_default
import paddle.trainer_config_helpers as conf_helps import paddle.trainer_config_helpers as conf_helps
class LayerType(type):
def __new__(cls, name, bases, attrs):
method_name = attrs.get('METHOD_NAME', None)
if method_name is not None:
method = getattr(conf_helps, method_name)
if method.__doc__ is not None:
mapper = attrs.get("__map_docstr__", None)
if mapper is not None:
attrs['__doc__'] = LayerType.__map_docstr__(
mapper(method.__doc__),
method_name=method_name,
name=name)
else:
attrs['__doc__'] = LayerType.__map_docstr__(
method.__doc__, method_name=method_name, name=name)
return super(LayerType, cls).__new__(cls, name, bases, attrs)
@staticmethod
def __map_docstr__(doc, name, method_name):
assert isinstance(doc, basestring)
# replace LayerOutput to paddle.v2.config_base.Layer
doc = doc.replace("LayerOutput", "paddle.v2.config_base.Layer")
doc = doc.replace('ParameterAttribute',
'paddle.v2.attr.ParameterAttribute')
doc = re.sub(r'ExtraLayerAttribute[^\s]?',
'paddle.v2.attr.ExtraAttribute', doc)
# xxx_layer to xxx
doc = re.sub(r"(?P<name>[a-z]+)_layer", r"\g<name>", doc)
# XxxxActivation to paddle.v2.Activation.Xxxx
doc = re.sub(r"(?P<name>[A-Z][a-zA-Z]+)Activation",
r"paddle.v2.Activation.\g<name>", doc)
# TODO(yuyang18): Add more rules if needed.
return doc
class Layer(object): class Layer(object):
__metaclass__ = LayerType
def __init__(self, name=None, parent_layers=None): def __init__(self, name=None, parent_layers=None):
assert isinstance(parent_layers, dict) assert isinstance(parent_layers, dict)
self.name = name self.name = name
...@@ -80,6 +123,8 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True): ...@@ -80,6 +123,8 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True):
wrapper = None wrapper = None
class V2LayerImpl(Layer): class V2LayerImpl(Layer):
METHOD_NAME = method_name
def __init__(self, **kwargs): def __init__(self, **kwargs):
parent_layers = dict() parent_layers = dict()
other_kwargs = dict() other_kwargs = dict()
......
...@@ -28,7 +28,7 @@ The primary usage shows below. ...@@ -28,7 +28,7 @@ The primary usage shows below.
act=paddle.activation.Softmax()) act=paddle.activation.Softmax())
# use prediction instance where needed. # use prediction instance where needed.
parameters = paddle.v2.parameters.create(cost) parameters = paddle.parameters.create(cost)
""" """
import collections import collections
...@@ -47,26 +47,32 @@ from paddle.trainer.config_parser import \ ...@@ -47,26 +47,32 @@ from paddle.trainer.config_parser import \
RecurrentLayerGroupEnd, model_type RecurrentLayerGroupEnd, model_type
import activation import activation
import re
import data_type import data_type
__all__ = ['parse_network', 'data'] __all__ = ['parse_network', 'data']
__projection_names__ = filter(lambda x: x.endswith('_projection'),
dir(conf_helps))
__all__ += __projection_names__
__operator_names__ = filter(lambda x: x.endswith('_operator'), dir(conf_helps))
__all__ += __operator_names__
def parse_network(*outputs): def parse_network(*outputs):
""" """
parse all output layers and then generate a model config proto. Parse all output layers and then generate a ModelConfig object.
:param outputs:
:return: .. note::
This function is used internally in paddle.v2 module. User should never
invoke this method.
:param outputs: Output layers.
:type outputs: Layer
:return: A ModelConfig object instance.
:rtype: ModelConfig
""" """
def __real_func__(): def __real_func__():
"""
__real_func__ is the function that config_parser.parse invoked. It is
the plain old paddle configuration function.
"""
context = dict() context = dict()
real_output = [each.to_proto(context=context) for each in outputs] real_output = [each.to_proto(context=context) for each in outputs]
conf_helps.outputs(real_output) conf_helps.outputs(real_output)
...@@ -81,6 +87,8 @@ So we also need to implement some special LayerV2. ...@@ -81,6 +87,8 @@ So we also need to implement some special LayerV2.
class DataLayerV2(Layer): class DataLayerV2(Layer):
METHOD_NAME = 'data_layer'
def __init__(self, name, type, **kwargs): def __init__(self, name, type, **kwargs):
assert isinstance(type, data_type.InputType) assert isinstance(type, data_type.InputType)
...@@ -99,6 +107,17 @@ class DataLayerV2(Layer): ...@@ -99,6 +107,17 @@ class DataLayerV2(Layer):
args[each] = self.__kwargs__[each] args[each] = self.__kwargs__[each]
return getattr(conf_helps, self.__method_name__)(name=self.name, **args) return getattr(conf_helps, self.__method_name__)(name=self.name, **args)
def __map_docstr__(doc):
doc = re.sub(r'(data = [^\)]+)\).*',
"data = paddle.layer.data(name=\"input\", "
"type=paddle.data_type.dense_vector(1000))", doc)
doc = re.sub(r':param size:.*',
':param type: Data type of this data layer', doc)
doc = re.sub(r':type size:.*',
":type size: paddle.v2.data_type.InputType", doc)
return doc
class WithExtraParent(Layer): class WithExtraParent(Layer):
def extra_parent(self): def extra_parent(self):
...@@ -347,6 +366,7 @@ class RecurrentLayerOutput(Layer): ...@@ -347,6 +366,7 @@ class RecurrentLayerOutput(Layer):
LayerV2 = Layer LayerV2 = Layer
data = DataLayerV2 data = DataLayerV2
data.__name__ = 'data'
AggregateLevel = conf_helps.layers.AggregateLevel AggregateLevel = conf_helps.layers.AggregateLevel
ExpandLevel = conf_helps.layers.ExpandLevel ExpandLevel = conf_helps.layers.ExpandLevel
memory = MemoryV2 memory = MemoryV2
...@@ -386,6 +406,7 @@ def __convert_layer__(_new_name_, _old_name_, _parent_names_): ...@@ -386,6 +406,7 @@ def __convert_layer__(_new_name_, _old_name_, _parent_names_):
global __all__ global __all__
__all__.append(_new_name_) __all__.append(_new_name_)
globals()[new_name] = __convert_to_v2__(_old_name_, _parent_names_) globals()[new_name] = __convert_to_v2__(_old_name_, _parent_names_)
globals()[new_name].__name__ = new_name
for each_layer_name in dir(conf_helps): for each_layer_name in dir(conf_helps):
...@@ -399,21 +420,6 @@ del parent_names ...@@ -399,21 +420,6 @@ del parent_names
del new_name del new_name
del each_layer_name del each_layer_name
# convert projection
for prj in __projection_names__:
globals()[prj] = __convert_to_v2__(
prj, parent_names=['input'], is_default_name=False)
# convert operator
operator_list = [
# [V1_method_name, parent_names],
['dotmul_operator', ['a', 'b']],
['conv_operator', ['img', 'filter']]
]
for op in operator_list:
globals()[op[0]] = __convert_to_v2__(
op[0], parent_names=op[1], is_default_name=False)
@wrap_name_default() @wrap_name_default()
def recurrent_group(step, input, name=None): def recurrent_group(step, input, name=None):
...@@ -464,3 +470,29 @@ def recurrent_group(step, input, name=None): ...@@ -464,3 +470,29 @@ def recurrent_group(step, input, name=None):
return retv[0] return retv[0]
else: else:
return retv return retv
__projection_names__ = filter(lambda x: x.endswith('_projection'),
dir(conf_helps))
__all__ += __projection_names__
__operator_names__ = filter(lambda x: x.endswith('_operator'), dir(conf_helps))
__all__ += __operator_names__
# convert projection
for prj in __projection_names__:
globals()[prj] = __convert_to_v2__(
prj, parent_names=['input'], is_default_name=False)
globals()[prj].__name__ = prj
# convert operator
operator_list = [
# [V1_method_name, parent_names],
['dotmul_operator', ['a', 'b']],
['conv_operator', ['img', 'filter']]
]
for op in operator_list:
globals()[op[0]] = __convert_to_v2__(
op[0], parent_names=op[1], is_default_name=False)
globals()[op[0]].__name__ = op[0]
...@@ -38,6 +38,7 @@ def __initialize__(): ...@@ -38,6 +38,7 @@ def __initialize__():
parent_names=parents, parent_names=parents,
is_default_name='name' in argspec.args) is_default_name='name' in argspec.args)
globals()[each_subnetwork] = v2_subnet globals()[each_subnetwork] = v2_subnet
globals()[each_subnetwork].__name__ = each_subnetwork
global __all__ global __all__
__all__.append(each_subnetwork) __all__.append(each_subnetwork)
......
...@@ -12,13 +12,15 @@ ...@@ -12,13 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.trainer_config_helpers.poolings import * import paddle.trainer_config_helpers.poolings
import copy
__all__ = ["Max", "CudnnMax", "Avg", "CudnnAvg", "Sum", "SquareRootN"] __all__ = []
suffix = 'Pooling'
Max = MaxPooling for name in paddle.trainer_config_helpers.poolings.__all__:
CudnnMax = CudnnMaxPooling new_name = name[:-len(suffix)]
Avg = AvgPooling globals()[new_name] = copy.copy(
CudnnAvg = CudnnAvgPooling getattr(paddle.trainer_config_helpers.poolings, name))
Sum = SumPooling globals()[new_name].__name__ = new_name
SquareRootN = SquareRootNPooling __all__.append(new_name)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册