tensor_patch_methods.py 37.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import inspect
16
import numpy as np
17 18
import warnings
import weakref
19
import sys
20 21

import paddle
22
from .. import framework
姜永久 已提交
23
from ..framework import convert_np_dtype_to_dtype_
24
from .. import core
25
from .. import unique_name
26 27 28 29 30 31 32 33
from ..framework import (
    Variable,
    Parameter,
    _getitem_impl_,
    _setitem_impl_,
    EagerParamBase,
    in_dygraph_mode,
)
34
from .base import switch_to_static_graph
35
from .math_op_patch import monkey_patch_math_tensor
36 37 38 39
from paddle.fluid.data_feeder import (
    convert_uint16_to_float,
    _PADDLE_DTYPE_2_NUMPY_DTYPE,
)
40
import paddle.utils.deprecated as deprecated
C
chenjian 已提交
41
import paddle.profiler as profiler
42
from paddle.profiler.utils import in_profiler_mode
43
from paddle import _C_ops, _legacy_C_ops
44
from paddle.device import get_all_custom_device_type
45
from paddle.fluid.framework import _global_flags
46

47 48
_grad_scalar = None

49

50
class TensorHookRemoveHelper:
51 52
    """
    A helper class that for removing Tensor gradient's hook.
53
    NOTE(wuweilong):the operation weakref.ref(tensor) will cause some unexpected errors in eager mode.
54 55 56
    """

    def __init__(self, tensor, hook_id):
57
        self._tensor = (
58 59 60
            tensor
            if framework.global_var._in_eager_mode_
            else weakref.ref(tensor)
61
        )
62 63 64 65 66 67 68 69 70
        self._hook_id = hook_id

    def remove(self):
        """
        Remove reference Tensor's hook.

        Returns:
            bool: Return True if removed successfully
        """
71 72 73 74 75
        tensor = (
            self._tensor
            if framework.global_var._in_eager_mode_
            else self._tensor()
        )
76 77 78 79 80 81 82
        if tensor is not None:
            res = tensor._remove_grad_hook(self._hook_id)
            if res is True:
                return True
            else:
                warnings.warn(
                    "The backward hook (ID: %d) of Tensor `%s` you want to remove does not exist or has been removed."
83 84 85
                    % (self._hook_id, tensor.name),
                    RuntimeWarning,
                )
86 87 88
        return False


89 90 91
_already_patch_repr = False


92
def monkey_patch_tensor():
93 94 95 96 97 98
    @switch_to_static_graph
    def _to_static_var(self, to_parameter=False, **kwargs):
        """
        **Notes**:
            **This API is ONLY available in Dygraph mode**

W
wanghuancoder 已提交
99
        Transform a Tensor into static Variable with same attributes. It's a low level interface used
100 101 102
        in dy2static and shall not be called directly.

        Args:
W
wanghuancoder 已提交
103 104
            to_parameter (bool): It takes effect only if the input a Tensor. If set True,
                                 the Tensor will be converted into framework.Parameters. Otherwise, it will
105 106 107 108 109 110 111 112 113 114 115
                                 be converted into framework.Variable. Default False.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
                import numpy as np

                data = np.ones([3, 1024], dtype='float32')
                with fluid.dygraph.guard():
116 117
                    tensor = to_variable(data)
                    static_var = tensor._to_static_var()
118 119

        """
120

121
        # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph.
122
        # It will fail. So, for propery that different between dynamic and static graph, should not getattr(self, attr, None).
123
        attr_not_need_keys = ['grad', 'T', 'place', '_place_str']
124
        param_keys = ['stop_gradient', 'trainable']
W
wanghuancoder 已提交
125
        if isinstance(self, EagerParamBase):
126
            attr_kwargs = self.__dict__.copy()
127 128
            for key in param_keys:
                attr_kwargs[key] = getattr(self, key)
129
        else:
130 131
            attr_names = []
            for name in dir(self):
132
                if name not in attr_not_need_keys:
133 134 135
                    if not inspect.ismethod(
                        getattr(self, name)
                    ) and not name.startswith('_'):
136
                        attr_names.append(name)
137 138 139 140 141 142
            attr_kwargs = {name: getattr(self, name) for name in attr_names}

        attr_keys = ['block', 'shape', 'dtype', 'type', 'name', 'persistable']
        for attr in attr_keys:
            attr_kwargs[attr] = getattr(self, attr, None)

143 144 145 146
        # If specify block, use it instead of self.block
        if 'block' in kwargs:
            attr_kwargs['block'] = kwargs['block']

147 148
        attr_kwargs.update(kwargs)

W
wanghuancoder 已提交
149
        if to_parameter or isinstance(self, EagerParamBase):
150
            del attr_kwargs['persistable']
151 152
            # NOTE(Aurelius84): All parameters should be placed into global block.
            attr_kwargs['block'] = attr_kwargs['block'].program.global_block()
153 154 155 156 157
            static_var = Parameter(**attr_kwargs)
        else:
            static_var = Variable(**attr_kwargs)
        return static_var

158 159 160 161 162
    # TODO(jiabin): move this to cplusplus end if we find some performance issue on it
    @framework.dygraph_only
    def set_value(self, value):
        """
        **Notes**:
T
tianshuo78520a 已提交
163
            **This API is ONLY available in Dygraph mode**
164 165 166 167 168 169 170 171 172 173 174

        Set a new value for this Variable.

        Args:
            value (Variable|np.ndarray): the new value.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
175
                from paddle.nn import Linear
176 177
                import numpy as np

178
                data = np.ones([3, 1024], dtype='float32')
179
                with fluid.dygraph.guard():
180
                    linear = Linear(1024, 4)
181
                    t = to_variable(data)
182
                    linear(t)  # call with default weight
183
                    custom_weight = np.random.randn(1024, 4).astype("float32")
184 185
                    linear.weight.set_value(custom_weight)  # change existing weight
                    out = linear(t)  # call with different weight
186 187

        """
W
wanghuancoder 已提交
188
        base_tensor = core.eager.Tensor
189 190
        assert isinstance(
            value, (np.ndarray, base_tensor, dict, str)
W
wanghuancoder 已提交
191
        ), "Variable set_value function, arguments type only support Variable, numpy, Tensor, dict, string."
S
Steffy-zxf 已提交
192 193 194 195 196

        if isinstance(value, (dict, str)):
            assert len(self) == len(
                value
            ), "Variable length not match, Variable [ {} ] need tensor with length {} but load set tensor with length {}".format(
197 198
                self.name, len(self), len(value)
            )
S
Steffy-zxf 已提交
199 200 201 202 203
            if isinstance(value, dict):
                self.value().set_vocab(value)
            else:
                self.value().set_string_list(value)
        else:
204 205 206 207 208
            assert self.shape == list(
                value.shape
            ), "Variable Shape not match, Variable [ {} ] need tensor with shape {} but load set tensor with shape {}".format(
                self.name, self.shape, value.shape
            )
C
crystal 已提交
209 210 211 212 213

            if isinstance(value, base_tensor):
                dtype = value.dtype
            else:
                dtype = convert_np_dtype_to_dtype_(value.dtype)
214

215 216 217 218 219
            assert (
                self.dtype == dtype
            ), "Variable dtype not match, Variable [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                self.name, self.dtype, dtype
            )
220

W
wanghuancoder 已提交
221
            # NOTE(wuweilong): self could be Tensor, the subsequent behavior are defined in different files
222
            # if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
223
            # this Interface behavior will be unifed in the future.
224 225 226
            self.value().get_tensor().set(
                value, framework._current_expected_place()
            )
227 228

    @framework.dygraph_only
229
    def backward(self, grad_tensor=None, retain_graph=False):
230
        """
231
        Run backward of current Graph which starts from current Tensor.
232

233
        The new gradient will accumulate on previous gradient.
234 235 236

        You can clear gradient by ``Tensor.clear_grad()`` .

237
        Args:
C
chenjian 已提交
238 239
            grad_tensor(Tensor, optional): initial gradient values of the current Tensor. If `grad_tensor` is None,
            the initial gradient values of the current Tensor would be Tensor filled with 1.0;
240
            if `grad_tensor` is not None, it must have the same length as the current Tensor.
241
            The default value is None.
242

243
            retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
244
                like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
245
                :code:`retain_graph` to True, then the grads will be retained. Thus, setting it to False is much more memory-efficient.
246
                Defaults to False.
247 248 249 250 251 252
        Returns:
            NoneType: None

        Examples:
            .. code-block:: python

253
                import paddle
254 255 256 257 258 259 260 261 262 263 264 265 266 267
                x = paddle.to_tensor(5., stop_gradient=False)
                for i in range(5):
                    y = paddle.pow(x, 4.0)
                    y.backward()
                    print("{}: {}".format(i, x.grad))
                # 0: [500.]
                # 1: [1000.]
                # 2: [1500.]
                # 3: [2000.]
                # 4: [2500.]

                x.clear_grad()
                print("{}".format(x.grad))
                # 0.
268

269 270 271 272 273 274 275 276 277 278 279
                grad_tensor=paddle.to_tensor(2.)
                for i in range(5):
                    y = paddle.pow(x, 4.0)
                    y.backward(grad_tensor)
                    print("{}: {}".format(i, x.grad))
                # 0: [1000.]
                # 1: [2000.]
                # 2: [3000.]
                # 3: [4000.]
                # 4: [5000.]

280
        """
J
Jiabin Yang 已提交
281
        if framework._non_static_mode():
282 283
            if in_profiler_mode():
                record_event = profiler.RecordEvent(
284 285
                    "Gradient Backward", profiler.TracerEventType.Backward
                )
286
                record_event.begin()
287
            if grad_tensor is not None:
288
                if framework.global_var._in_eager_mode_:
289
                    assert isinstance(
290 291
                        grad_tensor, core.eager.Tensor
                    ), "The type of grad_tensor must be paddle.Tensor"
292 293
                else:
                    assert isinstance(
294 295
                        grad_tensor, paddle.Tensor
                    ), "The type of grad_tensor must be paddle.Tensor"
296 297 298 299 300
                assert (
                    grad_tensor.shape == self.shape
                ), "Tensor shape not match, Tensor of grad_tensor [ {} ] with shape {} mismatch Tensor [ {} ] with shape {}".format(
                    grad_tensor.name, grad_tensor.shape, self.name, self.shape
                )
301

302
            if framework.global_var._in_eager_mode_:
303 304 305 306
                if grad_tensor is None:
                    grad_tensor = []
                else:
                    grad_tensor = [grad_tensor]
307 308 309
            if _grad_scalar:
                # When using amp with Fleet DistributedStrategy, we do loss scaling implicitly.
                self = _grad_scalar.scale(self)
310 311
            if framework.global_var._in_eager_mode_:
                core.eager.run_backward([self], grad_tensor, retain_graph)
312
            else:
313 314 315 316 317 318
                core.dygraph_run_backward(
                    [self],
                    [grad_tensor],
                    retain_graph,
                    framework._dygraph_tracer(),
                )
319 320
            if in_profiler_mode():
                record_event.end()
321 322
        else:
            raise ValueError(
323 324
                "Variable.backward() is only available in DyGraph mode"
            )
325 326

    @framework.dygraph_only
327 328
    @deprecated(
        since="2.1.0",
329
        level=1,
330
        reason="Please use tensor.grad, which returns the tensor value of the gradient.",
331
    )
332 333
    def gradient(self):
        """
334 335 336 337
        .. warning::
          This API will be deprecated in the future, it is recommended to use
          :code:`x.grad` which returns the tensor value of the gradient.

338
        Get the Gradient of Current Tensor.
339 340

        Returns:
341
            ndarray: Numpy value of the gradient of current Tensor
342 343 344 345

        Examples:
            .. code-block:: python

346
                import paddle
347

348 349 350
                x = paddle.to_tensor(5., stop_gradient=False)
                y = paddle.pow(x, 4.0)
                y.backward()
351
                print("grad of x: {}".format(x.gradient()))
352
                # [500.]
353 354

        """
355
        if framework.global_var._in_eager_mode_:
356
            if self.grad is None:
357
                return None
358
            if self.grad.is_selected_rows():
359 360
                return (np.array(self.grad), np.array(self.grad.rows()))
            return np.array(self.grad)
361 362 363
        else:
            if self._grad_ivar() is None:
                return None
364

365
            new_ivar = self._grad_ivar()
366 367 368 369 370 371
            # TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op
            if (
                _global_flags()['FLAGS_npu_storage_format']
                and 'npu' in get_all_custom_device_type()
            ):
                new_ivar = paddle.incubate._npu_identity(x=new_ivar, format=-1)
372
            new_ivar = new_ivar._copy_to(core.CPUPlace(), True)
373
            if self._grad_ivar().type == core.VarDesc.VarType.SELECTED_ROWS:
374 375 376 377
                return (
                    np.array(new_ivar.value().get_selected_rows().get_tensor()),
                    np.array(new_ivar.value().get_selected_rows().rows()),
                )
378 379
            else:
                return np.array(new_ivar.value().get_tensor())
380

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
    @framework.dygraph_only
    def register_hook(self, hook):
        """
        Registers a backward hook for current Tensor.

        The hook will be called every time the gradient Tensor of current Tensor is computed.

        The hook should not modify the input gradient Tensor, but it can optionally return
        a new gradient Tensor which will be used in place of current Tensor's gradient.

        The hook should have the following signature:

            hook(grad) -> Tensor or None

        Args:
            hook(function): A backward hook to be registered for Tensor.grad

        Returns:
            TensorHookRemoveHelper: A helper object that can be used to remove the registered hook by calling `remove()` method.

        Examples:
            .. code-block:: python

                import paddle

                # hook function return None
                def print_hook_fn(grad):
                    print(grad)

                # hook function return Tensor
                def double_hook_fn(grad):
                    grad = grad * 2
                    return grad

                x = paddle.to_tensor([0., 1., 2., 3.], stop_gradient=False)
                y = paddle.to_tensor([4., 5., 6., 7.], stop_gradient=False)
                z = paddle.to_tensor([1., 2., 3., 4.])

                # one Tensor can register multiple hooks
                h = x.register_hook(print_hook_fn)
                x.register_hook(double_hook_fn)

                w = x + y
                # register hook by lambda function
                w.register_hook(lambda grad: grad * 2)

                o = z.matmul(w)
                o.backward()
                # print_hook_fn print content in backward
                # Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
                #        [2., 4., 6., 8.])

                print("w.grad:", w.grad) # w.grad: [1. 2. 3. 4.]
                print("x.grad:", x.grad) # x.grad: [ 4.  8. 12. 16.]
                print("y.grad:", y.grad) # y.grad: [2. 4. 6. 8.]

                # remove hook
                h.remove()
        """
        if self.stop_gradient is True:
            raise RuntimeError(
442 443
                "Cannot register hook on a tensor that stop gradient."
            )
444 445 446 447 448

        hook_id = self._register_grad_hook(hook)
        helper = TensorHookRemoveHelper(self, hook_id)
        return helper

449 450 451 452 453 454 455 456 457
    @framework.dygraph_only
    def _to(self, device=None, dtype=None, blocking=None):

        if device is None and dtype is None and blocking is None:
            return self

        if device is not None:
            if isinstance(device, str):
                device = paddle.device._convert_to_place(device)
458
            elif isinstance(
459 460 461 462 463 464 465 466 467
                device,
                (
                    core.CPUPlace,
                    core.CUDAPlace,
                    core.CUDAPinnedPlace,
                    core.XPUPlace,
                    core.CustomPlace,
                ),
            ):
468 469 470
                pass
            else:
                raise ValueError(
471
                    "device value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace(), paddle.XPUPlace() or paddle.CustomPlace(), but the type of device is "
472 473
                    + type(device).__name__
                )
474 475 476 477 478

        if blocking is None:
            blocking = True
        else:
            assert isinstance(
479 480
                blocking, bool
            ), "blocking value error, must be the True, False or None"
481 482 483 484 485 486

        def transform(t, device, dtype, blocking):
            if device is None:
                device = t.place
            if dtype is None:
                dtype = t.dtype
487 488
            if type(dtype) is str:
                dtype = framework.convert_np_dtype_to_dtype_(dtype)
489 490 491

            # 1. gpu place need to determine whether the memory is sufficient for allocation.
            if t.place.is_gpu_place():
492
                size_dtype = core.size_of_dtype(dtype)
493 494 495 496
                # Note(weilong wu): Paddle GPU minimum memory allocation unit is 256 bytes,
                # waiting_alloc_memory will compute the memory space occupied by 't'.
                # Coefficient 1.2 is used to avoid OOM that may occur in this critical state when the memory is just enough.
                waiting_alloc_memory = (
497 498
                    ((t._numel() * size_dtype) / 256 + 1) * 256 * 1.2
                )
499
                gpu_memory_available = core.gpu_memory_available()
500 501 502 503 504 505 506 507 508 509 510 511 512
                if gpu_memory_available < waiting_alloc_memory:
                    # Copy Tensor to cpu
                    t_used = t._copy_to(paddle.CPUPlace(), blocking)
                    # Release memory of t
                    t._clear()
                else:
                    # Tensor still in GPU
                    t_used = t
            else:
                t_used = t

            # 2. cast Tensor to dtype
            if dtype is not None and dtype != t_used.dtype:
513
                with paddle.fluid.framework._dygraph_place_guard(
514 515
                    place=t_used.place
                ):
516
                    t_casted = t_used.cast(dtype=dtype)
517 518 519 520
            else:
                t_casted = t_used

            # 3. Copy casted Tensor(in CPU or GPU) to device
521 522 523 524
            if device is not None and not t_casted.place._equals(device):
                new_t = t_casted._copy_to(device, blocking)
            else:
                new_t = t_casted
525 526 527 528 529 530 531 532 533 534 535 536

            # 4. Share Tensor to origin Tensor
            dst_tensor = t.value().get_tensor()
            src_tensor = new_t.value().get_tensor()
            dst_tensor._share_data_with(src_tensor)

            return t

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning)
            return transform(self, device, dtype, blocking)

537 538 539
    @property
    def grad(self):
        """
540
        .. warning::
C
chenjian 已提交
541
          This API will return the tensor value of the gradient. If you want
542 543 544 545 546 547 548 549 550 551 552
          to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`.

        Get the Gradient of Current Tensor.

        Returns:
            Tensor: the gradient of current Tensor

        Examples:
            .. code-block:: python

                import paddle
553

554 555 556 557 558 559 560
                x = paddle.to_tensor(5., stop_gradient=False)
                y = paddle.pow(x, 4.0)
                y.backward()
                print("grad of x: {}".format(x.grad))
                # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False, [500.])

        """
561 562 563 564
        msg = (
            'tensor.grad will return the tensor value of the gradient.'
            ' This is an incompatible upgrade for tensor.grad API. '
            ' It\'s return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. '
565
            ' If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`'
566
        )
567
        warning_msg = "\033[93m\nWarning:\n%s \033[0m" % (msg)
568 569 570
        # ensure ANSI escape sequences print correctly in cmd and powershell
        if sys.platform.lower() == 'win32':
            warning_msg = "\nWarning:\n%s " % (msg)
571
        warnings.warn(warning_msg)
572
        return self._grad_ivar()
573

574 575 576 577 578 579
    def clear_grad(self):
        """
        The alias of clear_gradient().
        """
        self.clear_gradient()

580 581
    def item(self, *args):
        """
C
chenjian 已提交
582
        Convert element at specific position in Tensor into Python scalars. If the position is not specified, the Tensor must be a
583
        single-element Tensor.
584 585 586 587 588 589 590 591 592

        Args:
            *args(int): The input coordinates. If it's single int, the data in the corresponding order of flattened Tensor will be returned.
                Default: None, and it must be in the case where Tensor has only one element.

        Returns(Python scalar): A Python scalar, whose dtype is corresponds to the dtype of Tensor.

        Raises:
            ValueError: If the Tensor has more than one element, there must be coordinates.
C
chenjian 已提交
593

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
        Examples:
            .. code-block:: python

                import paddle

                x = paddle.to_tensor(1)
                print(x.item())             #1
                print(type(x.item()))       #<class 'int'>

                x = paddle.to_tensor(1.0)
                print(x.item())             #1.0
                print(type(x.item()))       #<class 'float'>

                x = paddle.to_tensor(True)
                print(x.item())             #True
                print(type(x.item()))       #<class 'bool'>

                x = paddle.to_tensor(1+1j)
                print(x.item())             #(1+1j)
                print(type(x.item()))       #<class 'complex'>

                x = paddle.to_tensor([[1.1, 2.2, 3.3]])
                print(x.item(2))            #3.3
                print(x.item(0, 2))         #3.3

        """
620 621 622 623
        scalar = self._getitem_from_offset(*args)
        if scalar.dtype == np.uint16:
            return convert_uint16_to_float(scalar).item()
        return scalar.item()
624

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
    @property
    def inplace_version(self):
        """
        The inplace version of current Tensor.
        The version number is incremented whenever the current Tensor is modified through an inplace operation.

        **Notes: This is a read-only property**

        Examples:
          .. code-block:: python

            import paddle
            var = paddle.ones(shape=[4, 2, 3], dtype="float32")
            print(var.inplace_version)  # 0

            var[1] = 2.2
            print(var.inplace_version)  # 1

        """
        return self._inplace_version()

646 647
    def __str__(self):
        """
W
wanghuancoder 已提交
648
        Convert a Tensor object to a readable string.
649

650
        Returns(str): A readable string.
651 652 653 654

        Examples:
            .. code-block:: python

655
                import paddle
656
                x = paddle.rand([2, 5])
657
                print(x)
C
chenjian 已提交
658

659 660 661
                # Tensor(shape=[2, 5], dtype=float32, place=CPUPlace,
                #        [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436],
                #         [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
662
        """
W
wanghuancoder 已提交
663
        from paddle.tensor.to_string import tensor_to_string
664

W
wanghuancoder 已提交
665
        return tensor_to_string(self)
666

667 668 669 670 671 672 673 674 675 676 677
    def __deepcopy__(self, memo):
        """
        Deep copy Tensor, it will always performs Tensor copy.

        Examples:
            .. code-block:: python

                import paddle
                import copy
                x = paddle.to_tensor(2.)
                y = copy.deepcopy(x)
C
chenjian 已提交
678

679 680 681 682 683 684 685 686 687 688 689 690 691
                print(x)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

                print(y)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

        """
        if not self.is_leaf:
            raise RuntimeError(
                "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
            )
692 693 694 695 696
        new_tensor = core.eager.Tensor()
        new_tensor.name = self.name + unique_name.generate("_deepcopy")
        memo[id(self)] = new_tensor
        new_tensor.copy_(self, True)
        return new_tensor
697

698 699 700
    @property
    def block(self):
        return framework.default_main_program().global_block()
701

702
    def __nonzero__(self):
zhouweiwei2014's avatar
zhouweiwei2014 已提交
703 704
        # np.prod([]) -> np.float64, so use int
        numel = int(np.prod(self.shape))
705 706 707
        assert (
            numel == 1
        ), "When Variable is used as the condition of if/while , Variable can only contain one element."
708
        if framework.global_var._in_eager_mode_:
709
            assert self._is_initialized(), "tensor not initialized"
710
            return bool(np.array(self) > 0)
711 712 713
        else:
            tensor = self.value().get_tensor()
            assert tensor._is_initialized(), "tensor not initialized"
714
            return bool(np.array(tensor) > 0)
715 716 717 718

    def __bool__(self):
        return self.__nonzero__()

719
    def __array__(self, dtype=None):
720 721
        """
        Returns a numpy array shows the value of current Tensor.
C
chenjian 已提交
722

723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
        Returns:
            ndarray: The numpy value of current Tensor.

        Returns type:
            ndarray: dtype is same as current Tensor

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np
                x = paddle.randn([2, 2])
                x_array = np.array(x)

                print(type(x_array))      #<class 'numpy.ndarray'>
                print(x_array.shape)      #(2, 2)
        """
740
        array = self.numpy(False)
741 742 743
        if dtype:
            array = array.astype(dtype)
        return array
744

W
WeiXin 已提交
745
    def contain_tensor(item):
746
        if not isinstance(item, (tuple, list)):
W
WeiXin 已提交
747 748 749 750
            item = [item]

        for slice_item in item:
            if isinstance(slice_item, slice):
751 752 753 754 755
                if (
                    isinstance(slice_item.start, Variable)
                    or isinstance(slice_item.stop, Variable)
                    or isinstance(slice_item.step, Variable)
                ):
W
WeiXin 已提交
756 757
                    return True
            else:
758 759 760 761
                if (
                    isinstance(slice_item, (Variable, np.ndarray))
                    and Variable.dtype != paddle.bool
                ):
W
WeiXin 已提交
762 763 764
                    return True
        return False

765
    def __getitem__(self, item):
W
WeiXin 已提交
766 767 768 769 770 771
        def is_list_tuple(index, contain_type):
            def _is_list_tuple(item):
                if isinstance(item, (tuple, list)):
                    for s in item:
                        if not _is_list_tuple(s):
                            return False
772 773 774
                else:
                    if type(item) != contain_type:
                        return False
W
WeiXin 已提交
775
                return True
776

W
WeiXin 已提交
777 778 779 780 781 782 783 784
            if not isinstance(index, (tuple, list)):
                return False
            for s in index:
                if not _is_list_tuple(s):
                    return False
            return True

        if contain_tensor(item) or is_list_tuple(item, int):
785 786 787 788 789 790 791 792
            # 1. Call _getitem_impl_ when item contains tensor.
            # Why not call a c++ function ? Because item can't be parsed when it contains tensor.
            return _getitem_impl_(self, item)

        else:
            # 2. Call c++ func getitem_index_not_tensor to speedup.
            return self._getitem_index_not_tensor(item)

W
WeiXin 已提交
793
    def __setitem__(self, item, value):
Z
zyfncg 已提交
794 795 796
        def contain_tensor_or_list(item):
            if not isinstance(item, tuple):
                item = [item]
W
WeiXin 已提交
797

Z
zyfncg 已提交
798 799 800 801 802 803 804 805
            for slice_item in item:
                if isinstance(slice_item, list):
                    return True
                elif isinstance(slice_item, Variable):
                    return True

            return False

806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
        def is_combine_index(item):
            var_type = None
            item_type = None
            if isinstance(item, (tuple, list)):
                for slice_item in item:
                    if item_type is None:
                        item_type = type(slice_item)
                    else:
                        if type(slice_item) != item_type:
                            return True

                    if isinstance(slice_item, Variable):
                        if var_type is None:
                            var_type = slice_item.dtype
                        else:
                            if var_type != slice_item.dtype:
                                return True
                return False

            return False

        if contain_tensor_or_list(item) and not is_combine_index(item):
Z
zyfncg 已提交
828 829
            # To reuse code with static graph,
            # Call _setitem_impl_ when item contains tensor or list.
W
WeiXin 已提交
830 831 832
            return _setitem_impl_(self, item, value)

        else:
833
            if framework.global_var._in_eager_mode_:
W
wanghuancoder 已提交
834 835 836 837
                return self.__setitem_eager_tensor__(item, value)
            else:
                # Call c++ func __setitem_varbase__ to speedup.
                return self.__setitem_varbase__(item, value)
W
WeiXin 已提交
838

839 840 841 842
    @framework.dygraph_only
    def _set_grad_ivar(self, value):
        if isinstance(self, EagerParamBase):
            self.grad = value
843
            self._unset_fake_empty()
844 845
        else:
            raise TypeError(
846 847
                "_set_grad_ivar is only supported for Parameter Tensor"
            )
848

849 850 851 852
    @framework.dygraph_only
    def value(self):
        return self

J
Jiabin Yang 已提交
853 854 855 856 857 858 859 860
    @framework.dygraph_only
    def _slice(self, begin_idx, end_idx):
        return core.eager.Tensor(self.get_tensor()._slice(begin_idx, end_idx))

    @framework.dygraph_only
    def _numel(self):
        return self.get_tensor()._numel()

B
Baibaifan 已提交
861 862 863 864
    @framework.dygraph_only
    def _clear_data(self):
        self.get_tensor()._clear()

865
    @framework.dygraph_only
866 867
    def _use_gpudnn(self, use_gpudnn=True):
        return self._tensor_use_gpudnn(use_gpudnn)
868

869 870
    @framework.dygraph_only
    def _uva(self, device_id=0):
W
Weilong Wu 已提交
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
        '''
        Returns self tensor with the UVA(unified virtual addressing).

        Args:
            device_id(int, optional): The destination GPU device id. Default: None, means current device.

        Examples:
            .. code-block:: python

              # required: gpu
              import paddle
              x = paddle.to_tensor([1, 2, 3], place=paddle.CPUPlace())
              x._uva()
              print(x)
        '''
886 887
        self._tensor_uva(device_id)

J
Jiabin Yang 已提交
888 889 890 891 892 893 894 895 896 897 898
    @framework.dygraph_only
    def cpu(self):
        if self.place.is_cpu_place():
            return self
        else:
            res = self._copy_to(core.CPUPlace(), True)
            res.stop_gradient = self.stop_gradient
            res.persistable = self.persistable
            return res

    @framework.dygraph_only
899
    def cuda(self, device_id=None, blocking=True):
900
        if device_id is None:
901 902 903 904 905 906 907 908 909
            res_place = framework._current_expected_place()
            if not isinstance(res_place, core.CUDAPlace):
                res_place = core.CUDAPlace(0)
        elif isinstance(device_id, int):
            res_place = core.CUDAPlace(device_id)
        else:
            raise ValueError("device_id must be int|None")

        if self.place._equals(res_place):
J
Jiabin Yang 已提交
910 911
            return self
        else:
912
            res = self._copy_to(res_place, True)
J
Jiabin Yang 已提交
913 914 915 916
            res.stop_gradient = self.stop_gradient
            res.persistable = self.persistable
            return res

W
wanghuancoder 已提交
917 918 919 920 921 922 923 924 925 926
    @framework.dygraph_only
    def pin_memory(self):
        if self.place.is_cuda_pinned_place():
            return self
        else:
            res = self._copy_to(core.CUDAPinnedPlace(), True)
            res.stop_gradient = self.stop_gradient
            res.persistable = self.persistable
            return res

927 928
    @framework.dygraph_only
    def values(self):
Z
zhangkaihuo 已提交
929 930 931 932 933 934 935 936 937 938 939 940
        """
        **Notes**:
            **This API is ONLY available in Dygraph mode**
        Get the values of current SparseTensor(COO or CSR).

        Returns:
            Tensor: A DenseTensor

        Examples:
            .. code-block:: python

                import paddle
941 942 943 944 945 946
                indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
                values = [1, 2, 3, 4, 5]
                dense_shape = [3, 4]
                sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
                print(sparse_x.values())
                #[1, 2, 3, 4, 5]
Z
zhangkaihuo 已提交
947
        """
948
        return _C_ops.sparse_values(self)
949 950 951

    @framework.dygraph_only
    def to_dense(self):
Z
zhangkaihuo 已提交
952 953 954 955 956 957 958 959 960 961 962 963
        """
        **Notes**:
            **This API is ONLY available in Dygraph mode**
        Convert the current SparseTensor(COO or CSR) to DenseTensor.

        Returns:
            Tensor: A DenseTensor

        Examples:
            .. code-block:: python

                import paddle
964 965 966 967 968 969 970 971
                indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
                values = [1, 2, 3, 4, 5]
                dense_shape = [3, 4]
                sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
                dense_x = sparse_x.to_dense()
                #[[0., 1., 0., 2.],
                # [0., 0., 3., 0.],
                # [4., 5., 0., 0.]]
Z
zhangkaihuo 已提交
972 973
        """

974
        return _C_ops.sparse_to_dense(self)
975 976 977

    @framework.dygraph_only
    def to_sparse_coo(self, sparse_dim):
Z
zhangkaihuo 已提交
978 979 980 981 982 983 984 985 986 987 988 989
        """
        **Notes**:
            **This API is ONLY available in Dygraph mode**
        Convert the current DenseTensor to SparseTensor in COO format.

        Returns:
            Tensor: A SparseCooTensor

        Examples:
            .. code-block:: python

                import paddle
990 991 992 993 994 995
                dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]]
                dense_x = paddle.to_tensor(dense_x, dtype='float32')
                sparse_x = dense_x.to_sparse_coo(sparse_dim=2)
                #indices=[[0, 0, 1, 1],
                #         [1, 3, 2, 3]],
                #values=[1., 2., 3., 4.]
Z
zhangkaihuo 已提交
996 997
        """

998
        return _C_ops.sparse_to_sparse_coo(self, sparse_dim)
999

1000 1001 1002
    def __hash__(self):
        return hash(id(self))

1003
    if framework.global_var._in_eager_mode_ and not hasattr(core, "eager"):
1004 1005
        return

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
    for method_name, method in (
        ("__bool__", __bool__),
        ("__nonzero__", __nonzero__),
        ("_to_static_var", _to_static_var),
        ("set_value", set_value),
        ("block", block),
        ("backward", backward),
        ("clear_grad", clear_grad),
        ("inplace_version", inplace_version),
        ("gradient", gradient),
        ("register_hook", register_hook),
        ("__str__", __str__),
        ("__repr__", __str__),
        ("__deepcopy__", __deepcopy__),
        ("__module__", "paddle"),
        ("__array__", __array__),
        ("__getitem__", __getitem__),
        ("item", item),
        ("__setitem__", __setitem__),
        ("_to", _to),
        ("values", values),
        ("to_dense", to_dense),
        ("to_sparse_coo", to_sparse_coo),
    ):
W
wanghuancoder 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
        setattr(core.eager.Tensor, method_name, method)

    setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar)
    setattr(core.eager.Tensor, "value", value)
    setattr(core.eager.Tensor, "cpu", cpu)
    setattr(core.eager.Tensor, "cuda", cuda)
    setattr(core.eager.Tensor, "pin_memory", pin_memory)
    setattr(core.eager.Tensor, "_slice", _slice)
    setattr(core.eager.Tensor, "_numel", _numel)
    setattr(core.eager.Tensor, "_uva", _uva)
    setattr(core.eager.Tensor, "_clear_data", _clear_data)
    setattr(core.eager.Tensor, "__hash__", __hash__)
    setattr(core.eager.Tensor, "_use_gpudnn", _use_gpudnn)
1043 1044 1045 1046 1047 1048

    global _already_patch_repr
    if not _already_patch_repr:
        # NOTE(zhiqiu): pybind11 will set a default __str__ method of enum class.
        # So, we need to overwrite it to a more readable one.
        # See details in https://github.com/pybind/pybind11/issues/2537.
1049
        origin = getattr(core.VarDesc.VarType, "__str__")
1050 1051 1052

        def dtype_str(dtype):
            if dtype in _PADDLE_DTYPE_2_NUMPY_DTYPE:
1053 1054 1055
                numpy_dtype = _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype]
                if numpy_dtype == 'uint16':
                    numpy_dtype = 'bfloat16'
1056
                prefix = 'paddle.'
1057
                return prefix + numpy_dtype
1058 1059 1060
            else:
                # for example, paddle.fluid.core.VarDesc.VarType.LOD_TENSOR
                return origin(dtype)
L
Leo Chen 已提交
1061

1062
        setattr(core.VarDesc.VarType, "__str__", dtype_str)
1063
        _already_patch_repr = True
L
Leo Chen 已提交
1064

1065 1066
    # patch math methods for tensor
    monkey_patch_math_tensor()