varbase_patch_methods.py 32.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import inspect
16
import numpy as np
17 18
import warnings
import weakref
19
import sys
20 21

import paddle
22 23
from .. import framework
from .. import core
24
from .. import unique_name
25
from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, _in_eager_mode, EagerParamBase
26
from .base import switch_to_static_graph
27
from .math_op_patch import monkey_patch_math_varbase
28
from .parallel import scale_loss
L
Leo Chen 已提交
29
from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE
30
import paddle.utils.deprecated as deprecated
C
chenjian 已提交
31
import paddle.profiler as profiler
H
hong 已提交
32
from paddle import _C_ops
33

34 35
_grad_scalar = None

36

37 38 39
class TensorHookRemoveHelper(object):
    """
    A helper class that for removing Tensor gradient's hook.
40
    NOTE(wuweilong):the operation weakref.ref(tensor) will cause some unexpected errors in eager mode.
41 42 43
    """

    def __init__(self, tensor, hook_id):
44
        self._tensor = tensor if core._in_eager_mode() else weakref.ref(tensor)
45 46 47 48 49 50 51 52 53
        self._hook_id = hook_id

    def remove(self):
        """
        Remove reference Tensor's hook.

        Returns:
            bool: Return True if removed successfully
        """
54
        tensor = self._tensor if core._in_eager_mode() else self._tensor()
55 56 57 58 59 60 61 62 63 64 65
        if tensor is not None:
            res = tensor._remove_grad_hook(self._hook_id)
            if res is True:
                return True
            else:
                warnings.warn(
                    "The backward hook (ID: %d) of Tensor `%s` you want to remove does not exist or has been removed."
                    % (self._hook_id, tensor.name), RuntimeWarning)
        return False


66 67 68
_already_patch_repr = False


69
def monkey_patch_varbase():
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
    @switch_to_static_graph
    def _to_static_var(self, to_parameter=False, **kwargs):
        """
        **Notes**:
            **This API is ONLY available in Dygraph mode**

        Transform a VarBase into static Variable with same attributes. It's a low level interface used
        in dy2static and shall not be called directly.

        Args:
            to_parameter (bool): It takes effect only if the input a VarBase. If set True,
                                 the VarBase will be converted into framework.Parameters. Otherwise, it will
                                 be converted into framework.Variable. Default False.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
                import numpy as np

                data = np.ones([3, 1024], dtype='float32')
                with fluid.dygraph.guard():
                    var_base = to_variable(data)
                    static_var = var_base._to_static_var()

        """
97

98
        # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph.
99 100
        # It will fail. So, for propery that different between dynamic and static graph, should not getattr(self, attr, None).
        attr_not_need_keys = ['grad', 'T']
J
Jiabin Yang 已提交
101
        if isinstance(self, (ParamBase, EagerParamBase)):
102 103
            attr_kwargs = self.__dict__.copy()
        else:
104 105
            attr_names = []
            for name in dir(self):
106 107 108 109
                if name not in attr_not_need_keys:
                    if not inspect.ismethod(getattr(
                            self, name)) and not name.startswith('_'):
                        attr_names.append(name)
110 111 112 113 114 115 116 117
            attr_kwargs = {name: getattr(self, name) for name in attr_names}

        attr_keys = ['block', 'shape', 'dtype', 'type', 'name', 'persistable']
        for attr in attr_keys:
            attr_kwargs[attr] = getattr(self, attr, None)

        attr_kwargs.update(kwargs)

J
Jiabin Yang 已提交
118
        if to_parameter or isinstance(self, (ParamBase, EagerParamBase)):
119
            del attr_kwargs['persistable']
120 121
            # NOTE(Aurelius84): All parameters should be placed into global block.
            attr_kwargs['block'] = attr_kwargs['block'].program.global_block()
122 123 124 125 126
            static_var = Parameter(**attr_kwargs)
        else:
            static_var = Variable(**attr_kwargs)
        return static_var

127 128 129 130 131
    # TODO(jiabin): move this to cplusplus end if we find some performance issue on it
    @framework.dygraph_only
    def set_value(self, value):
        """
        **Notes**:
T
tianshuo78520a 已提交
132
            **This API is ONLY available in Dygraph mode**
133 134 135 136 137 138 139 140 141 142 143

        Set a new value for this Variable.

        Args:
            value (Variable|np.ndarray): the new value.

        Examples:
            .. code-block:: python

                import paddle.fluid as fluid
                from paddle.fluid.dygraph.base import to_variable
144
                from paddle.fluid.dygraph import Linear
145 146
                import numpy as np

147
                data = np.ones([3, 1024], dtype='float32')
148
                with fluid.dygraph.guard():
149
                    linear = fluid.dygraph.Linear(1024, 4)
150
                    t = to_variable(data)
151
                    linear(t)  # call with default weight
152
                    custom_weight = np.random.randn(1024, 4).astype("float32")
153 154
                    linear.weight.set_value(custom_weight)  # change existing weight
                    out = linear(t)  # call with different weight
155 156

        """
157
        if core._in_eager_mode():
158
            base_tensor = core.eager.Tensor
159 160 161
        else:
            base_tensor = core.VarBase
        assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \
S
Steffy-zxf 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174
            "Variable set_value function, arguments type only support Variable, numpy, VarBase, dict, string."

        if isinstance(value, (dict, str)):
            assert len(self) == len(
                value
            ), "Variable length not match, Variable [ {} ] need tensor with length {} but load set tensor with length {}".format(
                self.name, len(self), len(value))
            if isinstance(value, dict):
                self.value().set_vocab(value)
            else:
                self.value().set_string_list(value)
        else:
            value_np = value
175
            if isinstance(value, base_tensor):
S
Steffy-zxf 已提交
176
                value_np = value.numpy()
177

S
Steffy-zxf 已提交
178
            self_tensor_np = self.numpy()
179

S
Steffy-zxf 已提交
180 181 182
            assert self_tensor_np.shape == value_np.shape, \
                "Variable Shape not match, Variable [ {} ] need tensor with shape {} but load set tensor with shape {}".format(
                    self.name, self_tensor_np.shape, value_np.shape)
183

S
Steffy-zxf 已提交
184 185 186
            assert self_tensor_np.dtype == value_np.dtype, \
                "Variable dtype not match, Variable [ {} ] need tensor with dtype {}  but load tensor with dtype {}".format(
                    self.name, self_tensor_np.dtype, value_np.dtype)
187

188
            # NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files
189
            # if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc
190
            # if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
191
            # this Interface behavior will be unifed in the future.
S
Steffy-zxf 已提交
192 193
            self.value().get_tensor().set(value_np,
                                          framework._current_expected_place())
194 195

    @framework.dygraph_only
196
    def backward(self, grad_tensor=None, retain_graph=False):
197
        """
198
        Run backward of current Graph which starts from current Tensor.
199

200 201 202 203
        The new gradient will accumulat on previous gradient.

        You can clear gradient by ``Tensor.clear_grad()`` .

204
        Args:
C
chenjian 已提交
205 206
            grad_tensor(Tensor, optional): initial gradient values of the current Tensor. If `grad_tensor` is None,
            the initial gradient values of the current Tensor would be Tensor filled with 1.0;
207 208 209
            if `grad_tensor` is not None, it must have the same length as the current Tensor.
            Teh default value is None.

210
            retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
211 212 213
                like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
                :code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
                Defaults to False.
214 215 216 217 218 219
        Returns:
            NoneType: None

        Examples:
            .. code-block:: python

220
                import paddle
221 222 223 224 225 226 227 228 229 230 231 232 233 234
                x = paddle.to_tensor(5., stop_gradient=False)
                for i in range(5):
                    y = paddle.pow(x, 4.0)
                    y.backward()
                    print("{}: {}".format(i, x.grad))
                # 0: [500.]
                # 1: [1000.]
                # 2: [1500.]
                # 3: [2000.]
                # 4: [2500.]

                x.clear_grad()
                print("{}".format(x.grad))
                # 0.
235

236 237 238 239 240 241 242 243 244 245 246
                grad_tensor=paddle.to_tensor(2.)
                for i in range(5):
                    y = paddle.pow(x, 4.0)
                    y.backward(grad_tensor)
                    print("{}: {}".format(i, x.grad))
                # 0: [1000.]
                # 1: [2000.]
                # 2: [3000.]
                # 3: [4000.]
                # 4: [5000.]

247 248
        """
        if framework.in_dygraph_mode():
C
chenjian 已提交
249 250 251
            record_event = profiler.RecordEvent(
                "Gradient Backward", profiler.TracerEventType.Backward)
            record_event.begin()
252
            if grad_tensor is not None:
253
                if core._in_eager_mode():
254
                    assert isinstance(
255 256
                        grad_tensor, core.eager.
                        Tensor), "The type of grad_tensor must be paddle.Tensor"
257 258 259 260
                else:
                    assert isinstance(
                        grad_tensor, paddle.
                        Tensor), "The type of grad_tensor must be paddle.Tensor"
261 262 263 264
                assert grad_tensor.shape == self.shape, \
                    "Tensor shape not match, Tensor of grad_tensor [ {} ] with shape {} mismatch Tensor [ {} ] with shape {}".format(
                    grad_tensor.name, grad_tensor.shape, self.name, self.shape)

265
            if core._in_eager_mode():
266 267 268 269
                if grad_tensor is None:
                    grad_tensor = []
                else:
                    grad_tensor = [grad_tensor]
270 271 272
            if _grad_scalar:
                # When using amp with Fleet DistributedStrategy, we do loss scaling implicitly.
                self = _grad_scalar.scale(self)
K
kuizhiqing 已提交
273
            if paddle.is_compiled_with_xpu() or paddle.is_compiled_with_npu():
274
                # TODO(liuyuhui): Currently only for xpu. Will be removed in the future.
275
                scaled_loss = scale_loss(self)
276
                if core._in_eager_mode():
277 278 279 280 281 282
                    core.eager.run_backward([scaled_loss], grad_tensor,
                                            retain_graph)
                else:
                    core.dygraph_run_backward([scaled_loss], [grad_tensor],
                                              retain_graph,
                                              framework._dygraph_tracer())
283
            else:
284
                if core._in_eager_mode():
285 286 287 288 289
                    core.eager.run_backward([self], grad_tensor, retain_graph)
                else:
                    core.dygraph_run_backward([self], [grad_tensor],
                                              retain_graph,
                                              framework._dygraph_tracer())
C
chenjian 已提交
290
            record_event.end()
291 292
        else:
            raise ValueError(
T
tianshuo78520a 已提交
293
                "Variable.backward() is only available in DyGraph mode")
294 295

    @framework.dygraph_only
296 297
    @deprecated(
        since="2.1.0",
298 299
        level=1,
        reason="Please use tensor.grad, which returns the tensor value of the gradient."
300
    )
301 302
    def gradient(self):
        """
303 304 305 306
        .. warning::
          This API will be deprecated in the future, it is recommended to use
          :code:`x.grad` which returns the tensor value of the gradient.

307
        Get the Gradient of Current Tensor.
308 309

        Returns:
310
            ndarray: Numpy value of the gradient of current Tensor
311 312 313 314

        Examples:
            .. code-block:: python

315
                import paddle
316

317 318 319
                x = paddle.to_tensor(5., stop_gradient=False)
                y = paddle.pow(x, 4.0)
                y.backward()
320
                print("grad of x: {}".format(x.gradient()))
321
                # [500.]
322 323

        """
324
        if core._in_eager_mode():
325
            if self.grad is None:
326 327 328 329 330 331
                return None
            # TODO(wanghuancoder) support SELECTED_ROWS
            return self.grad.numpy()
        else:
            if self._grad_ivar() is None:
                return None
332

333 334 335 336
            new_ivar = self._grad_ivar()._copy_to(core.CPUPlace(), True)
            if self._grad_ivar().type == core.VarDesc.VarType.SELECTED_ROWS:
                return (
                    np.array(new_ivar.value().get_selected_rows().get_tensor()),
337
                    np.array(new_ivar.value().get_selected_rows().rows()))
338 339
            else:
                return np.array(new_ivar.value().get_tensor())
340

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
    @framework.dygraph_only
    def register_hook(self, hook):
        """
        Registers a backward hook for current Tensor.

        The hook will be called every time the gradient Tensor of current Tensor is computed.

        The hook should not modify the input gradient Tensor, but it can optionally return
        a new gradient Tensor which will be used in place of current Tensor's gradient.

        The hook should have the following signature:

            hook(grad) -> Tensor or None

        Args:
            hook(function): A backward hook to be registered for Tensor.grad

        Returns:
            TensorHookRemoveHelper: A helper object that can be used to remove the registered hook by calling `remove()` method.

        Examples:
            .. code-block:: python

                import paddle

                # hook function return None
                def print_hook_fn(grad):
                    print(grad)

                # hook function return Tensor
                def double_hook_fn(grad):
                    grad = grad * 2
                    return grad

                x = paddle.to_tensor([0., 1., 2., 3.], stop_gradient=False)
                y = paddle.to_tensor([4., 5., 6., 7.], stop_gradient=False)
                z = paddle.to_tensor([1., 2., 3., 4.])

                # one Tensor can register multiple hooks
                h = x.register_hook(print_hook_fn)
                x.register_hook(double_hook_fn)

                w = x + y
                # register hook by lambda function
                w.register_hook(lambda grad: grad * 2)

                o = z.matmul(w)
                o.backward()
                # print_hook_fn print content in backward
                # Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
                #        [2., 4., 6., 8.])

                print("w.grad:", w.grad) # w.grad: [1. 2. 3. 4.]
                print("x.grad:", x.grad) # x.grad: [ 4.  8. 12. 16.]
                print("y.grad:", y.grad) # y.grad: [2. 4. 6. 8.]

                # remove hook
                h.remove()
        """
        if self.stop_gradient is True:
            raise RuntimeError(
                "Cannot register hook on a tensor that stop gradient.")

        hook_id = self._register_grad_hook(hook)
        helper = TensorHookRemoveHelper(self, hook_id)
        return helper

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
    @framework.dygraph_only
    def _to(self, device=None, dtype=None, blocking=None):

        if device is None and dtype is None and blocking is None:
            return self

        if device is not None:
            if isinstance(device, str):
                device = paddle.device._convert_to_place(device)
            elif isinstance(device, (core.CPUPlace, core.CUDAPlace,
                                     core.CUDAPinnedPlace, core.XPUPlace)):
                pass
            else:
                raise ValueError(
                    "device value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace() or paddle.XPUPlace(), but the type of device is "
                    + type(device).__name__)

        if blocking is None:
            blocking = True
        else:
            assert isinstance(
                blocking,
                bool), "blocking value error, must be the True, False or None"

        def transform(t, device, dtype, blocking):
            if device is None:
                device = t.place
            if dtype is None:
                dtype = t.dtype
437 438
            if type(dtype) is str:
                dtype = framework.convert_np_dtype_to_dtype_(dtype)
439 440 441

            # 1. gpu place need to determine whether the memory is sufficient for allocation.
            if t.place.is_gpu_place():
442
                size_dtype = core.size_of_dtype(dtype)
443 444 445 446 447
                # Note(weilong wu): Paddle GPU minimum memory allocation unit is 256 bytes,
                # waiting_alloc_memory will compute the memory space occupied by 't'.
                # Coefficient 1.2 is used to avoid OOM that may occur in this critical state when the memory is just enough.
                waiting_alloc_memory = (
                    (t._numel() * size_dtype) / 256 + 1) * 256 * 1.2
448
                gpu_memory_available = core.gpu_memory_available()
449 450 451 452 453 454 455 456 457 458 459 460 461
                if gpu_memory_available < waiting_alloc_memory:
                    # Copy Tensor to cpu
                    t_used = t._copy_to(paddle.CPUPlace(), blocking)
                    # Release memory of t
                    t._clear()
                else:
                    # Tensor still in GPU
                    t_used = t
            else:
                t_used = t

            # 2. cast Tensor to dtype
            if dtype is not None and dtype != t_used.dtype:
462 463 464
                with paddle.fluid.framework._dygraph_place_guard(
                        place=t_used.place):
                    t_casted = t_used.cast(dtype=dtype)
465 466 467 468
            else:
                t_casted = t_used

            # 3. Copy casted Tensor(in CPU or GPU) to device
469 470 471 472
            if device is not None and not t_casted.place._equals(device):
                new_t = t_casted._copy_to(device, blocking)
            else:
                new_t = t_casted
473 474 475 476 477 478 479 480 481 482 483 484

            # 4. Share Tensor to origin Tensor
            dst_tensor = t.value().get_tensor()
            src_tensor = new_t.value().get_tensor()
            dst_tensor._share_data_with(src_tensor)

            return t

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning)
            return transform(self, device, dtype, blocking)

485 486 487
    @property
    def grad(self):
        """
488
        .. warning::
C
chenjian 已提交
489
          This API will return the tensor value of the gradient. If you want
490 491 492 493 494 495 496 497 498 499 500
          to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`.

        Get the Gradient of Current Tensor.

        Returns:
            Tensor: the gradient of current Tensor

        Examples:
            .. code-block:: python

                import paddle
501

502 503 504 505 506 507 508
                x = paddle.to_tensor(5., stop_gradient=False)
                y = paddle.pow(x, 4.0)
                y.backward()
                print("grad of x: {}".format(x.grad))
                # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False, [500.])

        """
509 510 511 512
        msg = 'tensor.grad will return the tensor value of the gradient.' \
            ' This is an incompatible upgrade for tensor.grad API. ' \
            ' It\'s return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. ' \
            ' If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`'
513
        warning_msg = "\033[93m\nWarning:\n%s \033[0m" % (msg)
514 515 516
        # ensure ANSI escape sequences print correctly in cmd and powershell
        if sys.platform.lower() == 'win32':
            warning_msg = "\nWarning:\n%s " % (msg)
517
        warnings.warn(warning_msg)
518
        return self._grad_ivar()
519

520 521 522 523 524 525
    def clear_grad(self):
        """
        The alias of clear_gradient().
        """
        self.clear_gradient()

526 527
    def item(self, *args):
        """
C
chenjian 已提交
528
        Convert element at specific position in Tensor into Python scalars. If the position is not specified, the Tensor must be a
529
        single-element Tensor.
530 531 532 533 534 535 536 537 538

        Args:
            *args(int): The input coordinates. If it's single int, the data in the corresponding order of flattened Tensor will be returned.
                Default: None, and it must be in the case where Tensor has only one element.

        Returns(Python scalar): A Python scalar, whose dtype is corresponds to the dtype of Tensor.

        Raises:
            ValueError: If the Tensor has more than one element, there must be coordinates.
C
chenjian 已提交
539

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
        Examples:
            .. code-block:: python

                import paddle

                x = paddle.to_tensor(1)
                print(x.item())             #1
                print(type(x.item()))       #<class 'int'>

                x = paddle.to_tensor(1.0)
                print(x.item())             #1.0
                print(type(x.item()))       #<class 'float'>

                x = paddle.to_tensor(True)
                print(x.item())             #True
                print(type(x.item()))       #<class 'bool'>

                x = paddle.to_tensor(1+1j)
                print(x.item())             #(1+1j)
                print(type(x.item()))       #<class 'complex'>

                x = paddle.to_tensor([[1.1, 2.2, 3.3]])
                print(x.item(2))            #3.3
                print(x.item(0, 2))         #3.3

        """
        return self._getitem_from_offset(*args).item()

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
    @property
    def inplace_version(self):
        """
        The inplace version of current Tensor.
        The version number is incremented whenever the current Tensor is modified through an inplace operation.

        **Notes: This is a read-only property**

        Examples:
          .. code-block:: python

            import paddle
            var = paddle.ones(shape=[4, 2, 3], dtype="float32")
            print(var.inplace_version)  # 0

            var[1] = 2.2
            print(var.inplace_version)  # 1

        """
        return self._inplace_version()

589 590
    def __str__(self):
        """
591
        Convert a VarBase object to a readable string.
592

593
        Returns(str): A readable string.
594 595 596 597

        Examples:
            .. code-block:: python

598
                import paddle
599
                x = paddle.rand([2, 5])
600
                print(x)
C
chenjian 已提交
601

602 603 604
                # Tensor(shape=[2, 5], dtype=float32, place=CPUPlace,
                #        [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436],
                #         [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
605
        """
606
        if core._in_eager_mode():
607 608
            from paddle.tensor.to_string import tensor_to_string
            return tensor_to_string(self)
609 610 611
        else:
            from paddle.tensor.to_string import to_string
            return to_string(self)
612

613 614 615 616 617 618 619 620 621 622 623
    def __deepcopy__(self, memo):
        """
        Deep copy Tensor, it will always performs Tensor copy.

        Examples:
            .. code-block:: python

                import paddle
                import copy
                x = paddle.to_tensor(2.)
                y = copy.deepcopy(x)
C
chenjian 已提交
624

625 626 627 628 629 630 631 632 633 634 635 636 637
                print(x)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

                print(y)
                # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True,
                #        [2.])

        """
        if not self.is_leaf:
            raise RuntimeError(
                "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
            )
638
        if core._in_eager_mode():
639
            new_varbase = core.eager.Tensor()
640 641
        else:
            new_varbase = core.VarBase()
642 643 644 645 646
        new_varbase.name = self.name + unique_name.generate("_deepcopy")
        memo[id(self)] = new_varbase
        new_varbase.copy_(self, True)
        return new_varbase

647 648 649
    @property
    def block(self):
        return framework.default_main_program().global_block()
650

651 652 653
    def __nonzero__(self):
        numel = np.prod(self.shape)
        assert numel == 1, "When Variable is used as the condition of if/while , Variable can only contain one element."
654 655 656 657 658 659 660
        if core._in_eager_mode():
            assert self._is_initialized(), "tensor not initialized"
            return bool(np.all(self.numpy() > 0))
        else:
            tensor = self.value().get_tensor()
            assert tensor._is_initialized(), "tensor not initialized"
            return bool(np.all(tensor.__array__() > 0))
661 662 663 664

    def __bool__(self):
        return self.__nonzero__()

665
    def __array__(self, dtype=None):
666 667
        """
        Returns a numpy array shows the value of current Tensor.
C
chenjian 已提交
668

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
        Returns:
            ndarray: The numpy value of current Tensor.

        Returns type:
            ndarray: dtype is same as current Tensor

        Examples:
            .. code-block:: python

                import paddle
                import numpy as np
                x = paddle.randn([2, 2])
                x_array = np.array(x)

                print(type(x_array))      #<class 'numpy.ndarray'>
                print(x_array.shape)      #(2, 2)
        """
        array = self.numpy()
        if dtype:
            array = array.astype(dtype)
        return array
690

W
WeiXin 已提交
691
    def contain_tensor(item):
692
        if not isinstance(item, (tuple, list)):
W
WeiXin 已提交
693 694 695 696 697 698 699 700 701
            item = [item]

        for slice_item in item:
            if isinstance(slice_item, slice):
                if isinstance(slice_item.start, Variable)  \
                    or isinstance(slice_item.stop, Variable) \
                        or isinstance(slice_item.step, Variable):
                    return True
            else:
W
WeiXin 已提交
702 703 704
                if isinstance(
                        slice_item,
                    (Variable, np.ndarray)) and Variable.dtype != paddle.bool:
W
WeiXin 已提交
705 706 707
                    return True
        return False

708
    def __getitem__(self, item):
W
WeiXin 已提交
709 710 711 712 713 714
        def is_list_tuple(index, contain_type):
            def _is_list_tuple(item):
                if isinstance(item, (tuple, list)):
                    for s in item:
                        if not _is_list_tuple(s):
                            return False
715 716 717
                else:
                    if type(item) != contain_type:
                        return False
W
WeiXin 已提交
718
                return True
719

W
WeiXin 已提交
720 721 722 723 724 725 726 727
            if not isinstance(index, (tuple, list)):
                return False
            for s in index:
                if not _is_list_tuple(s):
                    return False
            return True

        if contain_tensor(item) or is_list_tuple(item, int):
728 729 730 731 732 733 734 735
            # 1. Call _getitem_impl_ when item contains tensor.
            # Why not call a c++ function ? Because item can't be parsed when it contains tensor.
            return _getitem_impl_(self, item)

        else:
            # 2. Call c++ func getitem_index_not_tensor to speedup.
            return self._getitem_index_not_tensor(item)

W
WeiXin 已提交
736
    def __setitem__(self, item, value):
Z
zyfncg 已提交
737 738 739
        def contain_tensor_or_list(item):
            if not isinstance(item, tuple):
                item = [item]
W
WeiXin 已提交
740

Z
zyfncg 已提交
741 742 743 744 745 746 747 748
            for slice_item in item:
                if isinstance(slice_item, list):
                    return True
                elif isinstance(slice_item, Variable):
                    return True

            return False

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
        def is_combine_index(item):
            var_type = None
            item_type = None
            if isinstance(item, (tuple, list)):
                for slice_item in item:
                    if item_type is None:
                        item_type = type(slice_item)
                    else:
                        if type(slice_item) != item_type:
                            return True

                    if isinstance(slice_item, Variable):
                        if var_type is None:
                            var_type = slice_item.dtype
                        else:
                            if var_type != slice_item.dtype:
                                return True
                return False

            return False

        if contain_tensor_or_list(item) and not is_combine_index(item):
Z
zyfncg 已提交
771 772
            # To reuse code with static graph,
            # Call _setitem_impl_ when item contains tensor or list.
W
WeiXin 已提交
773 774 775
            return _setitem_impl_(self, item, value)

        else:
W
wanghuancoder 已提交
776 777 778 779 780
            if core._in_eager_mode():
                return self.__setitem_eager_tensor__(item, value)
            else:
                # Call c++ func __setitem_varbase__ to speedup.
                return self.__setitem_varbase__(item, value)
W
WeiXin 已提交
781

782 783
    @framework.dygraph_only
    def _grad_ivar(self):
784 785 786 787
        if self.grad is not None:
            if self.grad._is_initialized():
                return self.grad
        return None
788

789 790 791 792 793 794 795 796 797 798
    @framework.dygraph_only
    def _set_grad_ivar(self, value):
        if isinstance(self, EagerParamBase):
            self.grad = value
        else:
            raise TypeError(
                "_set_grad_ivar is only supported for Parameter Tensor")

    @framework.dygraph_only
    def clone(self):
H
hong 已提交
799
        return _C_ops.assign(self)
800

801 802 803 804
    @framework.dygraph_only
    def value(self):
        return self

J
Jiabin Yang 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
    @framework.dygraph_only
    def _slice(self, begin_idx, end_idx):
        return core.eager.Tensor(self.get_tensor()._slice(begin_idx, end_idx))

    @framework.dygraph_only
    def _numel(self):
        return self.get_tensor()._numel()

    @framework.dygraph_only
    def cpu(self):
        if self.place.is_cpu_place():
            return self
        else:
            res = self._copy_to(core.CPUPlace(), True)
            res.stop_gradient = self.stop_gradient
            res.persistable = self.persistable
            return res

    @framework.dygraph_only
    def cuda(self, device_id, blocking):
        if self.place.is_gpu_place():
            return self
        else:
            res = self._copy_to(core.CUDAPlace(device_id), True)
            res.stop_gradient = self.stop_gradient
            res.persistable = self.persistable
            return res

833 834 835
    if core._in_eager_mode() and not hasattr(core, "eager"):
        return

836 837
    for method_name, method in (
        ("__bool__", __bool__), ("__nonzero__", __nonzero__),
838
        ("_to_static_var", _to_static_var), ("set_value", set_value),
839
        ("block", block), ("backward", backward), ("clear_grad", clear_grad),
840 841 842 843
        ("inplace_version", inplace_version), ("gradient", gradient),
        ("register_hook", register_hook), ("__str__", __str__),
        ("__repr__", __str__), ("__deepcopy__", __deepcopy__),
        ("__module__", "paddle"), ("__array__", __array__),
W
WeiXin 已提交
844
        ("__getitem__", __getitem__), ("item", item),
845
        ("__setitem__", __setitem__), ("_to", _to)):
846
        if core._in_eager_mode():
847
            setattr(core.eager.Tensor, method_name, method)
L
Leo Chen 已提交
848
        else:
849 850 851
            setattr(core.VarBase, method_name, method)

    if core._in_eager_mode():
852 853 854 855
        setattr(core.eager.Tensor, "_grad_ivar", _grad_ivar)
        setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar)
        setattr(core.eager.Tensor, "clone", clone)
        setattr(core.eager.Tensor, "value", value)
J
Jiabin Yang 已提交
856 857 858 859
        setattr(core.eager.Tensor, "cpu", cpu)
        setattr(core.eager.Tensor, "cuda", cuda)
        setattr(core.eager.Tensor, "_slice", _slice)
        setattr(core.eager.Tensor, "_numel", _numel)
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
    else:
        setattr(core.VarBase, "__name__", "Tensor")
        setattr(core.VarBase, "grad", grad)

    global _already_patch_repr
    if not _already_patch_repr:
        # NOTE(zhiqiu): pybind11 will set a default __str__ method of enum class.
        # So, we need to overwrite it to a more readable one.
        # See details in https://github.com/pybind/pybind11/issues/2537.
        origin = getattr(core.VarDesc.VarType, "__repr__")

        def dtype_str(dtype):
            if dtype in _PADDLE_DTYPE_2_NUMPY_DTYPE:
                prefix = 'paddle.'
                return prefix + _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype]
            else:
                # for example, paddle.fluid.core.VarDesc.VarType.LOD_TENSOR
                return origin(dtype)
L
Leo Chen 已提交
878

879 880
        setattr(core.VarDesc.VarType, "__repr__", dtype_str)
        _already_patch_repr = True
L
Leo Chen 已提交
881

882 883
    # patch math methods for varbase
    monkey_patch_math_varbase()