diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 1e55adf0a8cdd6a97f2fcdfeb4ec63ae402412c6..5f01fdb076d3bf7d060a805d1431f4973993a843 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -11,13 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import contextlib -from ..layer_helper import LayerHelper, unique_name -from ..framework import Program, Variable, Operator -from .. import core +from layer_function_generator import autodoc from tensor import assign, fill_constant -import contextlib -from ..registry import autodoc +from .. import core +from ..framework import Program, Variable, Operator +from ..layer_helper import LayerHelper, unique_name __all__ = [ 'split_lod_tensor', @@ -1477,7 +1477,7 @@ class DynamicRNN(object): method)) -@autodoc +@autodoc() def reorder_lod_tensor_by_rank(x, rank_table): helper = LayerHelper('reorder_lod_tensor_by_rank', **locals()) helper.is_instance('x', Variable) diff --git a/python/paddle/v2/fluid/layers/device.py b/python/paddle/v2/fluid/layers/device.py index 736813d1b109087da367666d90be9e88dad1860e..107511b5f4ab1108610bc1326f30e5d9ab407853 100644 --- a/python/paddle/v2/fluid/layers/device.py +++ b/python/paddle/v2/fluid/layers/device.py @@ -15,14 +15,14 @@ All util layers. """ -from ..layer_helper import LayerHelper +from layer_function_generator import autodoc from ..framework import unique_name -from ..registry import autodoc +from ..layer_helper import LayerHelper __all__ = ['get_places'] -@autodoc +@autodoc() def get_places(device_count=None, device_type=None): helper = LayerHelper('get_places', **locals()) out_places = helper.create_variable(name=unique_name(helper.name + ".out")) diff --git a/python/paddle/v2/fluid/registry.py b/python/paddle/v2/fluid/layers/layer_function_generator.py similarity index 94% rename from python/paddle/v2/fluid/registry.py rename to python/paddle/v2/fluid/layers/layer_function_generator.py index ff10542d40aabaf31897842754d38b7868472b21..b0e4d1635f7b5d0afdfa677e6ec1e8f9245a9d54 100644 --- a/python/paddle/v2/fluid/registry.py +++ b/python/paddle/v2/fluid/layers/layer_function_generator.py @@ -13,17 +13,19 @@ # limitations under the License. import re import cStringIO -import warnings import functools -import inspect +import warnings + +from .. import proto -import proto.framework_pb2 as framework_pb2 -from framework import OpProtoHolder, Variable, Program, Operator -from paddle.v2.fluid.layer_helper import LayerHelper, unique_name +framework_pb2 = proto.framework_pb2 + +from ..framework import OpProtoHolder, Variable +from ..layer_helper import LayerHelper __all__ = [ 'deprecated', - 'register_layer', + 'generate_layer_fn', 'autodoc', ] @@ -96,7 +98,7 @@ def _generate_doc_string_(op_proto): return buf.getvalue() -def register_layer(op_type): +def generate_layer_fn(op_type): """Register the Python layer for an Operator. Args: @@ -207,7 +209,10 @@ def deprecated(func_or_class): return func_wrapper -def autodoc(func): - func.__doc__ = _generate_doc_string_(OpProtoHolder.instance().get_op_proto( - func.__name__)) - return func +def autodoc(comment=""): + def __impl__(func): + func.__doc__ = _generate_doc_string_(OpProtoHolder.instance( + ).get_op_proto(func.__name__)) + comment + return func + + return __impl__ diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index d29607616266973b3e59dbae660724d0c3874def..b517f8be6a3e5558dd01afe094fb3989cfb3af44 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -11,8 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from ..registry import register_layer +from layer_function_generator import generate_layer_fn __activations__ = [ 'sigmoid', @@ -53,4 +52,4 @@ __all__ = [ ] + __activations__ for _OP in set(__all__): - globals()[_OP] = register_layer(_OP) + globals()[_OP] = generate_layer_fn(_OP) diff --git a/python/paddle/v2/fluid/tests/test_registry.py b/python/paddle/v2/fluid/tests/test_registry.py index 6435e7e243d4e7fa10c99fda48a011523d8cc588..44e50ca55ac609ed2e0a145ff12248fa18479668 100644 --- a/python/paddle/v2/fluid/tests/test_registry.py +++ b/python/paddle/v2/fluid/tests/test_registry.py @@ -11,26 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import unittest -import warnings import paddle.v2.fluid as fluid -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.registry as registry +import numpy as np +import decorators class TestRegistry(unittest.TestCase): + @decorators.prog_scope() def test_registry_layer(self): - self.layer_type = "mean" - program = framework.Program() - x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') - output = layers.mean(x) + output = fluid.layers.mean(x=x) + place = fluid.CPUPlace() exe = fluid.Executor(place) - X = np.random.random((10, 10)).astype("float32") - mean_out = exe.run(program, feed={"X": X}, fetch_list=[output]) - self.assertAlmostEqual(np.mean(X), mean_out) + mean_out = exe.run(feed={"X": X}, fetch_list=[output]) + self.assertAlmostEqual(np.mean(X), mean_out[0])