diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index a06e041c1e8aaa8897ac77f2ec1275824849e7ef..fd901f91a1ddf87b031a55cf5d807990fecf3961 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -57,6 +57,8 @@ import recordio_writer import parallel_executor from parallel_executor import * +from paddle.fluid.layers.math_op_patch import monkey_patch_variable + Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + \ @@ -138,5 +140,5 @@ def __bootstrap__(): # TODO(panyx0718): Avoid doing complex initialization logic in __init__.py. # Consider paddle.init(args) or paddle.main(args) -layers.monkey_patch_variable() +monkey_patch_variable() __bootstrap__() diff --git a/python/paddle/fluid/layers/__init__.py b/python/paddle/fluid/layers/__init__.py index cd1492da24d5e9d09a9eaac0b1b9c7aaffac6250..4917e67de0d20ff9e8f9a27f38e1bd2abef5c503 100644 --- a/python/paddle/fluid/layers/__init__.py +++ b/python/paddle/fluid/layers/__init__.py @@ -33,7 +33,6 @@ from metric_op import * from learning_rate_scheduler import * __all__ = [] -__all__ += math_op_patch.__all__ __all__ += nn.__all__ __all__ += io.__all__ __all__ += tensor.__all__ diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 1754061c4ba6f5b97bced3548bc412dfb1b7932c..f814c41633fbac76eb9411e2f418f521e8e9679d 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -16,8 +16,6 @@ from ..framework import Variable, unique_name from layer_function_generator import OpProtoHolder from ..initializer import force_init_on_cpu -__all__ = ['monkey_patch_variable'] - def monkey_patch_variable(): def unique_tmp_name():