math_op_patch.py 23.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
Y
Yang Yu 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
Y
Yang Yu 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
Y
Yang Yu 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import warnings
16 17
import inspect

18
from .. import core
19
from ..framework import Variable, unique_name, static_only
20
from .layer_function_generator import OpProtoHolder
21
from paddle.base.dygraph.base import in_to_static_mode
Y
Yang Yu 已提交
22

23
_supported_int_dtype_ = [
24
    core.VarDesc.VarType.BOOL,
25 26 27 28 29 30 31
    core.VarDesc.VarType.UINT8,
    core.VarDesc.VarType.INT8,
    core.VarDesc.VarType.INT16,
    core.VarDesc.VarType.INT32,
    core.VarDesc.VarType.INT64,
]

32 33
compare_ops = ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__']

34 35 36 37 38 39 40
EXPRESSION_MAP = {
    "__add__": "A + B",
    "__radd__": "A += B",
    "__sub__": "A - B",
    "__rsub__": "A -= B",
    "__mul__": "A * B",
    "__rmul__": "A *= B",
41
    "__div__": "A / B",
42
    "__truediv__": "A / B",
43
    "__rdiv__": "A /= B",
44 45 46 47 48
    "__rtruediv__": "A /= B",
    "__pow__": "A ** B",
    "__rpow__": "A **= B",
    "__floordiv__": "A //B",
    "__mod__": "A % B",
49
    "__matmul__": "A @ B",
50 51 52 53 54
    "__eq__": "A == B",
    "__ne__": "A != B",
    "__lt__": "A < B",
    "__le__": "A <= B",
    "__gt__": "A > B",
55
    "__ge__": "A >= B",
56 57
}

58 59
_already_patch_variable = False

Y
Yang Yu 已提交
60 61

def monkey_patch_variable():
Y
Yang Yu 已提交
62
    def unique_tmp_name():
Y
Yu Yang 已提交
63
        return unique_name.generate("tmp")
Y
Yang Yu 已提交
64 65 66 67 68 69 70 71

    def safe_get_dtype(var):
        try:
            dtype = var.dtype
        except:
            raise ValueError("Cannot get data type from %s", var.name)
        return dtype

72
    def current_block(var):
73
        return var.block.program.current_block()
74 75 76 77 78

    def create_new_tmp_var(block, dtype):
        tmp_name = unique_tmp_name()
        return block.create_var(name=tmp_name, dtype=dtype)

79 80 81 82
    def create_new_tmp_sparse_var(block, dtype, type):
        tmp_name = unique_tmp_name()
        return block.create_var(name=tmp_name, dtype=dtype, type=type)

Y
Yang Yu 已提交
83 84
    def create_tensor(block, value, dtype, shape):
        value = float(value)
85
        var = create_new_tmp_var(block, dtype)
86 87 88 89 90 91 92 93 94 95 96
        block.append_op(
            type="fill_constant",
            outputs={'Out': [var]},
            attrs={
                'dtype': var.dtype,
                'shape': shape,
                'value': value,
                'force_cpu': False,
            },
            stop_gradient=True,
        )
H
Hongyu Liu 已提交
97
        var.stop_gradient = True
Y
Yang Yu 已提交
98 99
        return var

Y
Yang Yu 已提交
100
    def create_scalar(block, value, dtype):
101
        return create_tensor(block, value, dtype, shape=[])
Y
Yang Yu 已提交
102

Y
Yang Yu 已提交
103 104 105
    def create_tensor_with_batchsize(ref_var, value, dtype):
        assert isinstance(ref_var, Variable)
        value = float(value)
106 107
        block = current_block(ref_var)
        var = create_new_tmp_var(block, dtype)
108
        batch_dim = -1
109
        out_shape = []
110 111
        for i, d in enumerate(ref_var.shape):
            if d < 0:
112 113 114 115 116 117 118
                if batch_dim < 0:
                    batch_dim = i
                    out_shape.append(d)
                else:
                    out_shape.append(1)
            else:
                out_shape.append(d)
119
        assert batch_dim != -1
120 121 122 123 124 125 126 127 128 129 130 131
        block.append_op(
            type='fill_constant_batch_size_like',
            outputs={'Out': [var]},
            inputs={'Input': [ref_var]},
            attrs={
                'shape': out_shape,
                'value': value,
                'input_dim_idx': batch_dim,
                'output_dim_idx': batch_dim,
            },
            stop_gradient=True,
        )
H
Hongyu Liu 已提交
132 133

        var.stop_gradient = True
Y
Yang Yu 已提交
134 135
        return var

136 137
    @static_only
    def cpu(self):
138
        """
139 140 141 142 143 144 145 146 147 148
        In dy2static, Variable also needs cpu() and cuda() interface.
        But, the underneath operator has only forward op but not backward one.

        Returns:
            The tensor which has copied to cpu place.

        Examples:
            In Static Graph Mode:

            .. code-block:: python
149

150 151 152 153 154
                import paddle
                paddle.enable_static()

                x = paddle.static.data(name="x", shape=[2,2], dtype='float32')
                y = x.cpu()
155
        """
156 157 158 159 160 161 162 163 164 165
        block = current_block(self)
        tmp_name = unique_tmp_name()
        output = block.create_var(
            name=tmp_name,
            dtype=self.dtype,
            shape=self.shape,
            type=self.type,
            persistable=False,
            stop_gradient=True,
        )
166
        # 0 means cpu place, see paddle/phi/kernels/memcpy_kernel.cc
167 168 169 170 171 172 173 174
        attrs = {'dst_place_type': 0}
        block.append_op(
            type='memcpy',
            inputs={'X': [self]},
            outputs={'Out': [output]},
            attrs=attrs,
        )
        return output
175 176

    @static_only
177
    def cuda(self, device_id=None, blocking=True):
178
        """
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
        In dy2static, Variable also needs cpu() and cuda() interface.
        But, the underneath operator has only forward op but not backward one.

        Args:
            self(Variable): The variable itself.
            device_id(int, optional): The destination GPU device id. Default: None, means current device.
                We add this argument for dy2static translation, please do not use it.
            blocking(bool, optional): Whether blocking or not, Default: True.
                We add this argument for dy2static translation, please do not use it.

        Returns:
            The tensor which has copied to cuda place.

        Examples:
            In Static Graph Mode:

            .. code-block:: python

                import paddle
                paddle.enable_static()

                x = paddle.static.data(name="x", shape=[2,2], dtype='float32')
                y = x.cpu()
                z = y.cuda()
203
        """
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
        if device_id is not None:
            warnings.warn("device_id is not supported, and it will be ignored.")
        if blocking is not True:
            warnings.warn("blocking is not supported, and it will be ignored.")

        block = current_block(self)
        tmp_name = unique_tmp_name()
        output = block.create_var(
            name=tmp_name,
            dtype=self.dtype,
            shape=self.shape,
            type=self.type,
            persistable=False,
            stop_gradient=True,
        )
        # 1 means cuda place, see paddle/phi/kernels/memcpy_kernel.cc
        attrs = {'dst_place_type': 1}
        block.append_op(
            type='memcpy',
            inputs={'X': [self]},
            outputs={'Out': [output]},
            attrs=attrs,
        )
        return output
228

229 230 231
    @static_only
    def place(self):
        """
232
        Variable don't have 'place' interface in static graph mode
233 234 235 236
        But this interface can greatly facilitate dy2static.
        So we give a warnning here and return None.
        """
        warnings.warn(
237
            "Variable do not have 'place' interface for static graph mode, try not to use it. None will be returned."
238 239 240
        )
        return None

Y
Yang Yu 已提交
241 242
    def astype(self, dtype):
        """
J
Jiabin Yang 已提交
243
        **Notes**:
244
            **The variable must be a** :ref:`api_base_Tensor`
J
Jiabin Yang 已提交
245

Y
Yang Yu 已提交
246
        Cast a variable to a specified data type.
J
Jiabin Yang 已提交
247

Y
Yang Yu 已提交
248
        Args:
J
Jiabin Yang 已提交
249

Y
Yang Yu 已提交
250
            self(Variable): The source variable
J
Jiabin Yang 已提交
251 252

            dtype: The target data type
Y
Yang Yu 已提交
253 254

        Returns:
J
Jiabin Yang 已提交
255 256 257 258 259 260
            Variable: Variable with new dtype

        Examples:
            In Static Graph Mode:

            .. code-block:: python
261
                import paddle
262
                import paddle.base as base
263
                paddle.enable_static()
264 265 266
                startup_prog = base.Program()
                main_prog = base.Program()
                with base.program_guard(startup_prog, main_prog):
267
                    original_variable = paddle.static.data(name = "new_variable", shape=[2,2], dtype='float32')
J
Jiabin Yang 已提交
268 269 270 271 272 273 274
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}".format(new_variable.dtype))

            In Dygraph Mode:

            .. code-block:: python

275
                import paddle.base as base
J
Jiabin Yang 已提交
276 277 278
                import numpy as np

                x = np.ones([2, 2], np.float32)
279 280
                with base.dygraph.guard():
                    original_variable = base.dygraph.to_variable(x)
J
Jiabin Yang 已提交
281 282 283 284
                    print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype))
                    new_variable = original_variable.astype('int64')
                    print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype))

Y
Yang Yu 已提交
285
        """
286 287
        block = current_block(self)
        out = create_new_tmp_var(block, dtype)
288 289 290 291 292 293
        block.append_op(
            type="cast",
            inputs={"X": [self]},
            outputs={"Out": [out]},
            attrs={"in_dtype": self.dtype, "out_dtype": out.dtype},
        )
294
        out.stop_gradient = self.stop_gradient
Y
Yang Yu 已提交
295 296
        return out

297 298 299
    @static_only
    def append(self, var):
        """
300 301
        **Notes**:
           **The type variable must be LoD Tensor Array.
302

303 304
        """
        if not isinstance(var, Variable):
305
            if in_to_static_mode():
306
                """in dy2static mode, x may be tensorable values such as int, float, np.array"""
307
                from paddle.tensor.creation import to_tensor
308

309 310 311
                var = to_tensor(var)
            else:
                raise TypeError(
312 313 314 315
                    "Required input var should be Variable, but received {}".format(
                        type(var)
                    )
                )
316 317
        if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
            raise TypeError(
318 319 320 321
                "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format(
                    self.type
                )
            )
322
        from paddle.tensor.array import array_length, array_write
323

324 325
        array_write(x=var, i=array_length(self), array=self)

326 327
    @static_only
    def _item(self):
328 329
        """
        In order to be compatible with the item interface introduced by the dynamic graph, it does nothing but returns self.
330 331 332 333
        It will check that the shape must be a 1-D tensor
        """
        if len(self.shape) > 1:
            raise TypeError(
334 335 336 337
                "Required input var should be 1-D Variable, but received {}".format(
                    self.shape
                )
            )
338 339
        return self

340 341 342
    @static_only
    def pop(self, *args):
        """
343
        The type variable must be LoD Tensor Array.
344
        When self is LoDTensorArray, calling pop is similar to Python's pop on list.
345 346 347 348 349 350 351
        This interface is used to simplify dygraph to static graph operations.

        Args:
            self(Variable): The source variable, which must be LOD_TENSOR_ARRAY
            *args: optional, a int means index.
        Returns:
            Variable: self[index]
352
        """
353
        from paddle.jit.dy2static.convert_operators import (
354 355 356
            _run_paddle_pop,
        )

357 358
        if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
            raise TypeError(
359 360 361 362
                "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format(
                    self.type
                )
            )
363 364
        return _run_paddle_pop(self, *args)

365
    def _scalar_op_(var, scale, bias):
366 367
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
368 369 370 371 372 373
        block.append_op(
            type="scale",
            inputs={"X": [var]},
            outputs={"Out": [out]},
            attrs={"scale": scale, "bias": bias},
        )
374 375
        return out

376
    def _neg_(var):
377
        return _scalar_op_(var, -1.0, 0.0)
378

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
    @property
    def _ndim_(self):
        """
        Returns the dimension of current Variable

        Returns:
            the dimension

        Examples:
            .. code-block:: python

                import paddle

                paddle.enable_static()

                # create a static Variable
                x = paddle.static.data(name='x', shape=[3, 2, 1])
                # print the dimension of the Variable
                print(x.ndim)
        """
        return len(self.shape)

C
cyberslack_lee 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
    def ndimension(self):
        """
        Returns the dimension of current Variable

        Returns:
            the dimension

        Examples:
            .. code-block:: python

                import paddle

                paddle.enable_static()

                # create a static Variable
                x = paddle.static.data(name='x', shape=[3, 2, 1])
                # print the dimension of the Variable
                print(x.ndimension)
        """
        return len(self.shape)

    def dim(self):
        """
        Returns the dimension of current Variable

        Returns:
            the dimension

        Examples:
            .. code-block:: python

                import paddle

                paddle.enable_static()

                # create a static Variable
                x = paddle.static.data(name='x', shape=[3, 2, 1])
                # print the dimension of the Variable
                print(x.dim)
        """
        return len(self.shape)

443 444
    def _scalar_add_(var, value):
        return _scalar_op_(var, 1.0, value)
445

446 447
    def _scalar_sub_(var, value):
        return _scalar_op_(var, 1.0, -value)
448

449 450
    def _scalar_rsub_(var, value):
        return _scalar_op_(var, -1.0, value)
451

452 453
    def _scalar_mul_(var, value):
        return _scalar_op_(var, value, 0.0)
454

455 456 457
    def _scalar_div_(var, value):
        return _scalar_op_(var, 1.0 / value, 0.0)

458 459 460
    def _binary_creator_(
        method_name, op_type, reverse=False, scalar_method=None
    ):
Y
Yang Yu 已提交
461
        def __impl__(self, other_var):
462 463 464 465 466 467 468 469 470
            # 1. scalar exists cases
            # we need combine the tensor.dtype and scalar.dtype, cast correct object
            if isinstance(other_var, float):
                # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float
                if self.dtype in _supported_int_dtype_:
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
                # but only +, -, *, / can use this method
                if scalar_method is not None:
471
                    return scalar_method(self, other_var)
472 473 474 475 476 477
            elif isinstance(other_var, int):
                # in all cases(+, -, *, /, **, //, %), we can cast it to float
                # because the output tensor.dtype depend on the type of input tensor
                other_var = float(other_var)
                # division is a special case
                # NOTE(chenweihang): because we cast tensor to float32 instead float64,
478 479 480
                # the division result can only guarantee the numerical accuracy of 6 digits
                # after the decimal point. The result of numpy calculation is of float64 type,
                # so the calculation result here and the calculation result of numpy are
481 482
                # different after 6 decimal point. If necessary, we can also use float64 here.
                # torch's behavior here is consistent with ours
483 484 485 486
                if (
                    op_type == 'elementwise_div'
                    and self.dtype in _supported_int_dtype_
                ):
487 488
                    self = astype(self, 'float32')
                # here use `scale` replace `elementwise` to get better performance
489
                # but only +, -, *, / can use this method
490 491 492 493 494
                if scalar_method is not None:
                    return scalar_method(self, other_var)
            else:
                # do nothing
                pass
495

496
            # 2. create variable for scalar
Y
Yang Yu 已提交
497 498 499 500 501
            lhs_dtype = safe_get_dtype(self)
            if not isinstance(other_var, Variable):
                if reverse:
                    for elem in self.shape:
                        if elem < 0:
张春乔 已提交
502 503 504
                            other_var = create_tensor_with_batchsize(
                                self, other_var, lhs_dtype
                            )
Y
Yang Yu 已提交
505
                            break
张春乔 已提交
506 507
                    else:
                        # when break is not triggered, enter the else branch
508 509 510 511 512 513
                        other_var = create_tensor(
                            current_block(self),
                            other_var,
                            dtype=lhs_dtype,
                            shape=self.shape,
                        )
Y
Yang Yu 已提交
514
                else:
515
                    # add fill_op to current_block
516 517 518
                    other_var = create_scalar(
                        current_block(self), value=other_var, dtype=lhs_dtype
                    )
Y
Yang Yu 已提交
519

520
            # 3. unify right var type to left var
Y
Yang Yu 已提交
521 522 523 524 525 526 527 528
            rhs_dtype = safe_get_dtype(other_var)
            if lhs_dtype != rhs_dtype:
                other_var = astype(other_var, lhs_dtype)
            if reverse:
                tmp = self
                self = other_var
                other_var = tmp

529 530 531 532 533 534
            if (
                op_type == "divide" or op_type == "elementwise_div"
            ) and self.dtype in _supported_int_dtype_:
                self = astype(self, 'float32')
                other_var = astype(other_var, 'float32')

535 536 537 538
            # NOTE(zhiqiu): the output of compare operator should be bool.
            if method_name in compare_ops:
                out = create_new_tmp_var(current_block(self), dtype="bool")
            else:
539 540 541
                out = create_new_tmp_var(
                    current_block(self), dtype=safe_get_dtype(self)
                )
542

543
            axis = -1
544
            if other_var.ndim > 0 and other_var.shape[0] == -1:
545 546 547
                stack = inspect.stack()[1]
                file_name = stack[1]
                line_num = stack[2]
548
                warnings.warn(
549 550 551
                    "%s:%s\nThe behavior of expression %s has been unified with %s(X, Y, axis=-1) from Paddle 2.0. "
                    "If your code works well in the older versions but crashes in this version, try to use "
                    "%s(X, Y, axis=0) instead of %s. This transitional warning will be dropped in the future."
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
                    % (
                        file_name,
                        line_num,
                        EXPRESSION_MAP[method_name],
                        op_type,
                        op_type,
                        EXPRESSION_MAP[method_name],
                    ),
                    category=DeprecationWarning,
                )
            current_block(self).append_op(
                type=op_type,
                inputs={'X': [self], 'Y': [other_var]},
                outputs={'Out': out},
                attrs={'axis': axis},
            )
Y
Yang Yu 已提交
568 569 570 571 572 573 574 575
            return out

        comment = OpProtoHolder.instance().get_op_proto(op_type).comment

        __impl__.__doc__ = """
        {0}
        Args:
            self(Variable): left hand variable
576
            other_var(Variable|float|int): right hand variable
Y
Yang Yu 已提交
577 578 579

        Returns:
            Variable
580 581 582
        """.format(
            comment
        )
Y
Yang Yu 已提交
583 584 585
        __impl__.__name__ = method_name
        return __impl__

586 587 588
    def values(var):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
589 590 591 592 593 594
        block.append_op(
            type="sparse_values",
            inputs={"x": [var]},
            outputs={"out": [out]},
            attrs={},
        )
595 596 597 598 599
        return out

    def indices(var):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
600 601 602 603 604 605
        block.append_op(
            type="sparse_indices",
            inputs={"x": [var]},
            outputs={"out": [out]},
            attrs={},
        )
606 607 608 609 610
        return out

    def to_dense(var):
        block = current_block(var)
        out = create_new_tmp_var(block, var.dtype)
611 612 613 614 615 616
        block.append_op(
            type="sparse_to_dense",
            inputs={"x": [var]},
            outputs={"out": [out]},
            attrs={},
        )
617 618
        return out

619 620 621 622
    variable_methods = [
        #   b=-a
        ('__neg__', _neg_),
        ('astype', astype),
623 624
        ('cpu', cpu),
        ('cuda', cuda),
625
        ('place', place),
626
        ('append', append),
627
        ('item', _item),
628
        ('pop', pop),
C
cyberslack_lee 已提交
629 630
        ('dim', dim),
        ('ndimension', ndimension),
631
        ('ndim', _ndim_),
632 633 634 635
        (
            '__add__',
            _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_),
        ),
636
        #  a+b == b+a. Do not need to reverse explicitly
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
        (
            '__radd__',
            _binary_creator_(
                '__radd__', 'elementwise_add', False, _scalar_add_
            ),
        ),
        (
            '__sub__',
            _binary_creator_('__sub__', 'elementwise_sub', False, _scalar_sub_),
        ),
        (
            '__rsub__',
            _binary_creator_(
                '__rsub__', 'elementwise_sub', True, _scalar_rsub_
            ),
        ),
        (
            '__mul__',
            _binary_creator_('__mul__', 'elementwise_mul', False, _scalar_mul_),
        ),
657
        #  a*b == b*a. Do not need to reverse explicitly
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
        (
            '__rmul__',
            _binary_creator_(
                '__rmul__', 'elementwise_mul', False, _scalar_mul_
            ),
        ),
        (
            '__div__',
            _binary_creator_('__div__', 'elementwise_div', False, _scalar_div_),
        ),
        (
            '__truediv__',
            _binary_creator_(
                '__truediv__', 'elementwise_div', False, _scalar_div_
            ),
        ),
        (
            '__rdiv__',
            _binary_creator_('__rdiv__', 'elementwise_div', True, None),
        ),
        (
            '__rtruediv__',
            _binary_creator_('__rtruediv__', 'elementwise_div', True, None),
        ),
        (
            '__pow__',
            _binary_creator_('__pow__', 'elementwise_pow', False, None),
        ),
        (
            '__rpow__',
            _binary_creator_('__rpow__', 'elementwise_pow', True, None),
        ),
        (
            '__floordiv__',
            _binary_creator_(
                '__floordiv__', 'elementwise_floordiv', False, None
            ),
        ),
        (
            '__mod__',
            _binary_creator_('__mod__', 'elementwise_mod', False, None),
        ),
        (
            '__matmul__',
            _binary_creator_('__matmul__', "matmul_v2", False, None),
        ),
704 705 706 707 708 709
        #  for logical compare
        ('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
        ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
        ('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
        ('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
        ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
710 711 712 713
        ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
        ('values', values),
        ('indices', indices),
        ('to_dense', to_dense),
714 715 716 717 718 719 720 721 722 723
    ]

    global _already_patch_variable
    if not _already_patch_variable:
        for method in variable_methods:
            method_name = method[0]
            method_impl = method[1]
            setattr(Variable, method_name, method_impl)
    else:
        import paddle.tensor
724

725
        for method_name in paddle.tensor.tensor_method_func:
726 727
            if hasattr(Variable, method_name):
                continue
728
            method_impl = getattr(paddle.tensor, method_name, None)
729 730
            if method_impl:
                setattr(Variable, method_name, method_impl)
731

732 733
        for magic_method, origin_method in paddle.tensor.magic_method_func:
            impl = getattr(paddle.tensor, origin_method, None)
734 735
            if impl:
                setattr(Variable, magic_method, impl)
736

737
    _already_patch_variable = True