diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index ca237df8e53fe14124fe255685a87240e03553fb..f319ab27c063a112e49ebcfbe29c9ffdf10109c6 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -24,11 +24,15 @@ except ImportError: ) from .batch import batch # noqa: F401 + +# Do the *DUPLICATED* monkey-patch for the tensor object. +# We need remove the duplicated code here once we fix +# the illogical implement in the monkey-patch methods later. from .framework import monkey_patch_variable -from .framework import monkey_patch_math_varbase +from .framework import monkey_patch_math_tensor monkey_patch_variable() -monkey_patch_math_varbase() +monkey_patch_math_tensor() from .framework import disable_signal_handler # noqa: F401 from .framework import get_flags # noqa: F401 diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index c33406dba8e6a4b69ef312cdb18f1d9be3c29fa8..a82febe3ad6d73e58e28fbb68adc912cbc847825 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -79,7 +79,7 @@ from . import compiler from .compiler import * from paddle.fluid.layers.math_op_patch import monkey_patch_variable from .dygraph.base import enable_dygraph, disable_dygraph -from .dygraph.varbase_patch_methods import monkey_patch_varbase +from .dygraph.tensor_patch_methods import monkey_patch_tensor from .core import _cuda_synchronize from .trainer_desc import ( TrainerDesc, @@ -211,7 +211,7 @@ def __bootstrap__(): # Consider paddle.init(args) or paddle.main(args) monkey_patch_variable() __bootstrap__() -monkey_patch_varbase() +monkey_patch_tensor() # NOTE(Aurelius84): clean up ExecutorCacheInfo in advance manually. atexit.register(core.clear_executor_cache) diff --git a/python/paddle/fluid/dygraph/__init__.py b/python/paddle/fluid/dygraph/__init__.py index 36dbf90c52b8b94c11ce63f345ac71a8ba7b005a..c40262a45d7c368fb292d9316fb48f578becc363 100644 --- a/python/paddle/fluid/dygraph/__init__.py +++ b/python/paddle/fluid/dygraph/__init__.py @@ -21,8 +21,6 @@ from .tracer import * from . import learning_rate_scheduler from .learning_rate_scheduler import * -from .math_op_patch import monkey_patch_math_varbase - __all__ = [] __all__ += base.__all__ __all__ += learning_rate_scheduler.__all__ diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 220d849070d18eb6323be4910ba23f77eb1597a8..7b20e626b8e95dbd4d027b8f9cf7a2f97bf54bc0 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -65,7 +65,7 @@ _complex_dtypes = [ _already_patch_eager_tensor = False -def monkey_patch_math_varbase(): +def monkey_patch_math_tensor(): """ Similar to monkey_patch_variable. The difference is, in dygraph mode, use auto-generated op functions for better performance. @@ -248,7 +248,7 @@ def monkey_patch_math_varbase(): # do nothing pass - # 2. create varbase for scalar + # 2. create Tensor for scalar lhs_dtype = self.dtype other_var_should_be = core.eager.Tensor if not isinstance(other_var, other_var_should_be): @@ -343,7 +343,7 @@ def monkey_patch_math_varbase(): __impl__.__name__ = method_name return __impl__ - varbase_methods = [ + tensor_methods = [ ('__neg__', _neg_), ('__float__', _float_), ('__long__', _long_), @@ -498,7 +498,7 @@ def monkey_patch_math_varbase(): setattr(local_tensor, method_name, method_impl) else: - for method in varbase_methods: + for method in tensor_methods: method_name = method[0] method_impl = method[1] setattr(local_tensor, method_name, method_impl) diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/tensor_patch_methods.py similarity index 98% rename from python/paddle/fluid/dygraph/varbase_patch_methods.py rename to python/paddle/fluid/dygraph/tensor_patch_methods.py index 5e64cd6bad3cb1325da45727d5c8bf101eff5b2c..882a333b5ebf7448e2ff6ed5f0759c79b42a9920 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/tensor_patch_methods.py @@ -32,7 +32,7 @@ from ..framework import ( in_dygraph_mode, ) from .base import switch_to_static_graph -from .math_op_patch import monkey_patch_math_varbase +from .math_op_patch import monkey_patch_math_tensor from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE import paddle.utils.deprecated as deprecated import paddle.profiler as profiler @@ -86,7 +86,7 @@ class TensorHookRemoveHelper: _already_patch_repr = False -def monkey_patch_varbase(): +def monkey_patch_tensor(): @switch_to_static_graph def _to_static_var(self, to_parameter=False, **kwargs): """ @@ -110,8 +110,8 @@ def monkey_patch_varbase(): data = np.ones([3, 1024], dtype='float32') with fluid.dygraph.guard(): - var_base = to_variable(data) - static_var = var_base._to_static_var() + tensor = to_variable(data) + static_var = tensor._to_static_var() """ @@ -700,11 +700,11 @@ def monkey_patch_varbase(): raise RuntimeError( "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy" ) - new_varbase = core.eager.Tensor() - new_varbase.name = self.name + unique_name.generate("_deepcopy") - memo[id(self)] = new_varbase - new_varbase.copy_(self, True) - return new_varbase + new_tensor = core.eager.Tensor() + new_tensor.name = self.name + unique_name.generate("_deepcopy") + memo[id(self)] = new_tensor + new_tensor.copy_(self, True) + return new_tensor @property def block(self): @@ -1073,5 +1073,5 @@ def monkey_patch_varbase(): setattr(core.VarDesc.VarType, "__str__", dtype_str) _already_patch_repr = True - # patch math methods for varbase - monkey_patch_math_varbase() + # patch math methods for tensor + monkey_patch_math_tensor() diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index a699a2f0fe72089cf91233fde3deffd571815694..63ab3a65bb6b93d6bb4210e474a16f7f4571880c 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -112,8 +112,6 @@ _global_expected_place_ = None _current_device = None global_prog_seed = 0 _current_pipeline_stage = None -_already_patch_eager_tensor = False -_already_patch_varbase = False _current_cuda_graph_mode = None _global_flags_ = core.globals() @@ -182,35 +180,6 @@ extra_op_attrs = { # to make sure in most case, we find new dygraph mode first with only one if statement. -def _update_monkey_methods(): - """ - Update monkey methods of Tensor or eager.Tensor while - switching eager mode and legacy mode. - """ - from paddle import _C_ops, _legacy_C_ops - from .dygraph.varbase_patch_methods import monkey_patch_varbase - from .dygraph import monkey_patch_math_varbase - - global _already_patch_eager_tensor - global _already_patch_varbase - - if not _already_patch_eager_tensor: - monkey_patch_varbase() - monkey_patch_math_varbase() - - _already_patch_eager_tensor = True - - # switch Paddle.Tensor bind type - _switch_tensor_bind_type() - - -def _switch_tensor_bind_type(): - import paddle - - paddle.Tensor = core.eager.Tensor - paddle.Tensor.__qualname__ = 'Tensor' - - def _in_eager_without_dygraph_check(): return global_var._in_eager_mode_ diff --git a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py index 4624897f0168efbe4ad9ce1343064ff5a6acf0aa..1e4bfe50515dd8fc2f3ba4836cf5efeda3d86df0 100755 --- a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py +++ b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py @@ -118,7 +118,7 @@ class TestDeprecatedDocorator(unittest.TestCase): with warnings.catch_warnings(record=True) as w: grad = x.gradient() assert ( - 'API "paddle.fluid.dygraph.varbase_patch_methods.gradient" is ' + 'API "paddle.fluid.dygraph.tensor_patch_methods.gradient" is ' 'deprecated since 2.1.0' ) in str(w[-1].message) diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index 2b9449b7c0902944df7dd90ddc4b1f31c76c7d9b..3c6338e345ddb481e0c45fdb05ec64bac7b6429c 100755 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -44,8 +44,11 @@ from .io_utils import _pack_loaded_dict from .io_utils import _unpack_saved_dict from .io_utils import _load_program_scope -from ..fluid import monkey_patch_variable -from ..fluid.dygraph import monkey_patch_math_varbase +# Do the *DUPLICATED* monkey-patch for the tensor object. +# We need remove the duplicated code here once we fix +# the illogical implement in the monkey-patch methods later. +from ..fluid.layers.math_op_patch import monkey_patch_variable +from ..fluid.dygraph.math_op_patch import monkey_patch_math_tensor from ..fluid.framework import disable_signal_handler # noqa: F401 from ..fluid.framework import get_flags # noqa: F401 from ..fluid.framework import set_flags # noqa: F401