提交 077aa207 编写于 作者: X Xin Pan

hide monkey_patch_variable

上级 f02a4da6
...@@ -57,6 +57,8 @@ import recordio_writer ...@@ -57,6 +57,8 @@ import recordio_writer
import parallel_executor import parallel_executor
from parallel_executor import * from parallel_executor import *
from paddle.fluid.layers.math_op_patch import monkey_patch_variable
Tensor = LoDTensor Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + \ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + \
...@@ -138,5 +140,5 @@ def __bootstrap__(): ...@@ -138,5 +140,5 @@ def __bootstrap__():
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py. # TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args) # Consider paddle.init(args) or paddle.main(args)
layers.monkey_patch_variable() monkey_patch_variable()
__bootstrap__() __bootstrap__()
...@@ -33,7 +33,6 @@ from metric_op import * ...@@ -33,7 +33,6 @@ from metric_op import *
from learning_rate_scheduler import * from learning_rate_scheduler import *
__all__ = [] __all__ = []
__all__ += math_op_patch.__all__
__all__ += nn.__all__ __all__ += nn.__all__
__all__ += io.__all__ __all__ += io.__all__
__all__ += tensor.__all__ __all__ += tensor.__all__
......
...@@ -16,8 +16,6 @@ from ..framework import Variable, unique_name ...@@ -16,8 +16,6 @@ from ..framework import Variable, unique_name
from layer_function_generator import OpProtoHolder from layer_function_generator import OpProtoHolder
from ..initializer import force_init_on_cpu from ..initializer import force_init_on_cpu
__all__ = ['monkey_patch_variable']
def monkey_patch_variable(): def monkey_patch_variable():
def unique_tmp_name(): def unique_tmp_name():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册