base.py 29.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from ..wrapped_decorator import signature_safe_contextmanager, wrap_decorator
S
songyouwei 已提交
15
import decorator
16
import contextlib
17 18
import functools
import inspect
19
import sys
20 21 22
import numpy as np
from paddle.fluid import core
from paddle.fluid import framework
H
hong 已提交
23
from paddle.fluid.multiprocess_utils import CleanupFuncRegistrar
M
minqiyang 已提交
24
from .tracer import Tracer
Z
Zeng Jinle 已提交
25
import logging
26
from ..data_feeder import convert_dtype
L
Leo Chen 已提交
27
import warnings
28 29 30 31 32
from ..framework import (
    _get_paddle_place,
    _in_legacy_dygraph,
    _in_eager_without_dygraph_check,
)
33
import paddle
34
import warnings
35

36
__all__ = [
37 38 39 40 41 42 43 44
    'no_grad',
    'no_grad_',
    'grad',
    'guard',
    'enable_dygraph',
    'disable_dygraph',
    'enabled',
    'to_variable',
45
]
46

H
hjyp 已提交
47
# Flag that indicates whether running code under `@to_static`
48 49 50 51 52
_in_declarative_mode_ = False


def in_declarative_mode():
    """
H
hjyp 已提交
53
    Return a bool value that indicates whether running code under `@to_static`
54 55 56 57

    """
    return _in_declarative_mode_

58

59 60 61
def declarative_unsupport_argument_warning(
    func_name, input_names, inputs, support_values
):
62 63 64 65 66 67 68 69
    """
    Warning if inputs do not elementwisely equals to support_values.
    It's a utility function for dy2static when dygraph interface have
    more inputs than static interface such as paddle.grad.

    """
    for name, inp, sup in zip(input_names, inputs, support_values):
        if inp != sup:
70 71 72 73
            warnings.warn(
                f"{func_name} has unsupported parameter in jit: "
                + f"{name}, jit will discard it"
            )
74 75


76 77 78 79 80 81 82 83 84 85 86
def _switch_to_static_graph_(func):
    def __impl__(*args, **kwargs):
        with framework._dygraph_guard(None):
            return func(*args, **kwargs)

    return __impl__


switch_to_static_graph = wrap_decorator(_switch_to_static_graph_)


87 88 89 90 91 92 93 94 95 96
@signature_safe_contextmanager
def _switch_declarative_mode_guard_(is_declarative=True):

    global _in_declarative_mode_
    original_val = _in_declarative_mode_
    _in_declarative_mode_ = is_declarative
    yield
    _in_declarative_mode_ = original_val


97 98 99 100 101 102
@signature_safe_contextmanager
def program_desc_tracing_guard(enable):
    tracer = framework._dygraph_tracer()
    if tracer:
        original_val = tracer._enable_program_desc_tracing
        tracer._enable_program_desc_tracing = enable
103 104 105 106 107
    try:
        yield
    finally:
        if tracer:
            tracer._enable_program_desc_tracing = original_val
108 109


110 111 112
_functional_dygraph_context_manager = None


113 114
@signature_safe_contextmanager
def param_guard(parameters):
115
    # Note: parameters is a reference of self._parameters or self._buffers
116 117 118 119 120
    if (
        in_declarative_mode()
        and not framework._non_static_mode()
        and parameters
    ):
121 122
        origin_parameters = parameters.copy()
        for name, var_base in parameters.items():
123 124 125 126 127
            if isinstance(var_base, list):
                new_var = [_convert_into_variable(var) for var in var_base]
            else:
                new_var = _convert_into_variable(var_base)
            parameters[name] = new_var
128 129 130 131 132 133
        yield
        parameters.update(origin_parameters)
    else:
        yield


J
Jiabin Yang 已提交
134
def _convert_into_variable(tensor):
135 136 137
    """
    Convert Varbase into Variable.
    """
J
Jiabin Yang 已提交
138
    if isinstance(tensor, (core.eager.Tensor, core.VarBase)):
139
        # Check whether has been created before.
J
Jiabin Yang 已提交
140
        new_var = tensor.block._find_var_recursive(tensor.name)
141 142 143
        if new_var is not None:
            assert isinstance(new_var, framework.Variable)
        # Convert ParamBase into Parameter with same attributes in dy2stat.
144 145 146
        elif isinstance(
            tensor, (framework.EagerParamBase, framework.ParamBase)
        ):
J
Jiabin Yang 已提交
147
            new_var = tensor._to_static_var(to_parameter=True)
148 149 150 151 152 153 154 155 156
        else:
            # Note(Aurelius84): Convert VarBase in self._buffers into Variable with
            # same attributes and set persistable=True to allow saving this var.
            # Because users can create a VarBase in `__init__`  like a
            # `mask` Tensor or `hidden_0` in RNN layers, which is equivalent to a Parameter
            # and necessary for inferring. It will be pruned if it's not necessary for inferring.

            # But if its shape is empty while created from `create_variable()`, we consider this buffer
            # non-persistable. See case of `drop_state` in lstm api.
J
Jiabin Yang 已提交
157
            is_persistable = len(tensor.shape) > 0
158

159 160 161
            new_var = tensor._to_static_var(
                to_parameter=False, persistable=is_persistable
            )
162 163
        return new_var
    else:
J
Jiabin Yang 已提交
164
        return tensor
165 166


167
def enabled():
168 169 170
    """
    This function checks whether the program runs in dynamic graph mode or not.
    You can enter dynamic graph mode with :ref:`api_fluid_dygraph_guard` api,
171 172
    or enable and disable dynamic graph mode with :ref:`api_fluid_dygraph_enable_dygraph`
    and :ref:`api_fluid_dygraph_disable_dygraph` api .
173 174

    **Note**:
J
Jiabin Yang 已提交
175 176
        ``fluid.dygraph.enabled`` is the alias of ``fluid.in_dygraph_mode``, and
        ``fluid.in_dygraph_mode`` is recommended to use for now.
177 178 179 180 181 182 183 184 185 186 187 188 189 190

    Returns:
        bool: Whether the program is running in dynamic graph mode.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid

            fluid.enable_dygraph()  # Now we are in dygragh mode
            print(fluid.dygraph.enabled())  # True
            fluid.disable_dygraph()
            print(fluid.dygraph.enabled())  # False
    """
J
Jiabin Yang 已提交
191
    # TODO(jiabin): Make this check as in_dygraph_mode when we support default eager mode.
J
Jiabin Yang 已提交
192
    return framework._non_static_mode()
193 194


195 196
def enable_dygraph(place=None):
    """
197 198 199 200 201

    .. note::
        Dynamic graph mode is turn ON by default since paddle 2.0.0

    This API turn OFF static graph mode. You can turn ON static graph mode by `enable_static <./disable_dygraph_en.html>`_ .
202 203

    Parameters:
204
        place(paddle.CPUPlace|paddle.CUDAPlace|str, optional): Place to run dynamic graph. Default: None. Which means that the running place will be
205 206
            determined according to the way of paddle compilation. If ``place`` is string, It can be ``cpu``, and ``gpu:x``, where ``x`` is the
            index of the GPUs.
207 208 209 210 211 212 213

    return:
        None

    Examples:
        .. code-block:: python

214 215 216 217 218 219 220 221
            import paddle
            print(paddle.in_dynamic_mode())  # True, dynamic mode is turn ON by default since paddle 2.0.0

            paddle.enable_static()
            print(paddle.in_dynamic_mode())  # False, Now we are in static mode

            paddle.disable_static()
            print(paddle.in_dynamic_mode())  # True, Now we are in dynamic mode
222 223 224

    """
    global _functional_dygraph_context_manager
S
songyouwei 已提交
225
    if _functional_dygraph_context_manager is None:
226
        _functional_dygraph_context_manager = guard(
227 228
            place=_get_paddle_place(place)
        )
S
songyouwei 已提交
229
        _functional_dygraph_context_manager.__enter__()
230

H
hong 已提交
231 232 233
        # call disable_dygraph when Python exit
        CleanupFuncRegistrar.register(disable_dygraph)

234 235 236

def disable_dygraph():
    """
237 238 239 240 241

    .. note::
        Dynamic graph mode is turn ON by default since paddle 2.0.0

    This API turn ON static graph mode. You can turn ON static graph mode by `disable_static <./enable_dygraph_en.html>`_ .
242 243 244 245 246 247 248

    return:
        None

    Examples:
        .. code-block:: python

249 250 251 252 253 254 255 256
            import paddle
            print(paddle.in_dynamic_mode())  # True, dynamic mode is turn ON by default since paddle 2.0.0

            paddle.enable_static()
            print(paddle.in_dynamic_mode())  # False, Now we are in static mode

            paddle.disable_static()
            print(paddle.in_dynamic_mode())  # True, Now we are in dynamic mode
257 258 259 260 261 262 263 264

    """
    global _functional_dygraph_context_manager
    if _functional_dygraph_context_manager is not None:
        _functional_dygraph_context_manager.__exit__(*sys.exc_info())
        _functional_dygraph_context_manager = None


265 266 267 268
@signature_safe_contextmanager
def _switch_tracer_mode_guard_(is_train=True):
    tracer = framework._dygraph_tracer()
    if tracer:
269 270
        has_grad = tracer._has_grad
        tracer._has_grad = is_train
271 272 273
        try:
            yield
        finally:
274
            tracer._has_grad = has_grad
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
    else:
        yield


def no_grad(func=None):
    """
    :api_attr: imperative

    Create a context which disables dygraph gradient calculation.
    In this mode, the result of every computation will have `stop_gradient=True`.

    Also functions as a decorator. (Make sure to instantiate without parenthesis.)

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        # use as generator

        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
            l0 = fluid.Linear(2, 2)  # l0.weight.gradient() is None
            l1 = fluid.Linear(2, 2)
            with fluid.dygraph.no_grad():
                # l1.weight.stop_gradient is False
                tmp = l1.weight * 2  # tmp.stop_gradient is True
            x = fluid.dygraph.to_variable(data)
            y = l0(x) + tmp
            o = l1(y)
            o.backward()
            print(tmp.gradient() is None)  # True
            print(l0.weight.gradient() is None)  # False

        # use as decorator

        @fluid.dygraph.no_grad
        def test_layer():
            with fluid.dygraph.guard():
                inp = np.ones([3, 1024], dtype='float32')
                t = fluid.dygraph.base.to_variable(inp)
                linear1 = fluid.Linear(1024, 4, bias_attr=False)
                linear2 = fluid.Linear(4, 4)
                ret = linear1(t)
                dy_ret = linear2(ret)

        test_layer()

    """
326 327 328 329
    if in_declarative_mode():
        warnings.warn(
            "paddle.no_grad is only supported for inference model, and not supported for training under @to_static."
        )
330 331 332 333 334 335 336 337 338 339 340 341 342
    if func is None:
        return _switch_tracer_mode_guard_(is_train=False)
    else:

        @decorator.decorator
        def __impl__(func, *args, **kwargs):
            with _switch_tracer_mode_guard_(is_train=False):
                return func(*args, **kwargs)

        return __impl__(func)


class no_grad_:
343
    """
344 345
    :api_attr: imperative

346
    Create a context which disables dygraph gradient calculation.
347 348
    In this mode, the result of every computation will have `stop_gradient` set
    to `True`.
349

350
    Also functions as a decorator. (Make sure to use an instance.)
351 352 353 354 355 356

    Examples:

     .. code-block:: python

        import numpy as np
357
        import paddle
358

359 360 361
        # use as generator

        data = np.array([[2, 3], [4, 5]]).astype('float32')
362 363 364
        l0 = paddle.nn.Linear(2, 2)  # l0.weight.gradient() is None
        l1 = paddle.nn.Linear(2, 2)
        with paddle.no_grad():
365 366
            # l1.weight.stop_gradient is False
            tmp = l1.weight * 2  # tmp.stop_gradient is True
367
        x = paddle.to_tensor(data)
368 369 370 371 372
        y = l0(x) + tmp
        o = l1(y)
        o.backward()
        print(tmp.gradient() is None)  # True
        print(l0.weight.gradient() is None)  # False
373 374 375

        # use as decorator

376
        @paddle.no_grad()
377
        def test_layer():
378
            inp = np.ones([3, 1024], dtype='float32')
379 380 381
            t = paddle.to_tensor(inp)
            linear1 = paddle.nn.Linear(1024, 4, bias_attr=False)
            linear2 = paddle.nn.Linear(4, 4)
382 383
            ret = linear1(t)
            dy_ret = linear2(ret)
384 385 386 387

        test_layer()
    """

388
    def __call__(self, func):
S
songyouwei 已提交
389
        @decorator.decorator
390 391
        def _decorate_function(func, *args, **kwargs):
            with self:
392
                return func(*args, **kwargs)
393

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
        @decorator.decorator
        def _decorate_generator(func, *args, **kwargs):
            gen = func(*args, **kwargs)
            with self:
                for x in gen:
                    yield x

        if inspect.isgeneratorfunction(func):
            return _decorate_generator(func)
        else:
            return _decorate_function(func)

    def __enter__(self):
        tracer = framework._dygraph_tracer()
        if tracer:
409 410
            self.orig = tracer._has_grad
            tracer._has_grad = False
411 412 413 414

    def __exit__(self, *args):
        tracer = framework._dygraph_tracer()
        if tracer:
415
            tracer._has_grad = self.orig
416 417


S
rename  
sneaxiy 已提交
418
@signature_safe_contextmanager
P
Paddle CI 已提交
419
def guard(place=None):
420
    """
421 422
    :api_attr: imperative

423
    This context will create a dygraph context for dygraph to run, using python ``with`` statement.
424

425
    Parameters:
426
        place(fluid.CPUPlace| fluid.CUDAPlace|str, optional): Place to execute dygraph.
427 428 429
            If None, the running place will be determined according to the way of paddle compilation.
            If ``place`` is string, It can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None
430 431 432 433 434 435 436 437 438 439 440 441

    return:
        None

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard():
442
            inp = np.ones([3, 1024], dtype='float32')
443
            t = fluid.dygraph.base.to_variable(inp)
444 445 446 447
            linear1 = fluid.Linear(1024, 4, bias_attr=False)
            linear2 = fluid.Linear(4, 4)
            ret = linear1(t)
            dy_ret = linear2(ret)
448 449

    """
450 451
    train = framework.Program()
    startup = framework.Program()
J
Jiabin Yang 已提交
452
    tracer = Tracer()
453
    VarBase = core.VarBase
M
minqiyang 已提交
454

455
    if place is not None:
456
        expected_place = _get_paddle_place(place)
457 458
    else:
        expected_place = framework._current_expected_place()
M
minqiyang 已提交
459

460 461
    with framework.program_guard(train, startup):
        with framework.unique_name.guard():
L
lujun 已提交
462
            with framework._dygraph_guard(tracer):
463
                with framework._dygraph_place_guard(expected_place):
P
Paddle CI 已提交
464
                    yield
465 466


467
@framework.non_static_only
468 469 470 471 472 473 474 475 476 477
def grad(
    outputs,
    inputs,
    grad_outputs=None,
    retain_graph=None,
    create_graph=False,
    only_inputs=True,
    allow_unused=False,
    no_grad_vars=None,
):
478
    '''
Z
Zeng Jinle 已提交
479
    .. note::
480
        **This API is ONLY available in imperative mode.**
Z
Zeng Jinle 已提交
481 482 483 484

    This API computes the sum of gradients of `outputs` with respect to each `inputs` .

    Parameters:
485
        outputs (Tensor|list(Tensor)|tuple(Tensor)): the output Tensor or
486
            Tensor list/tuple of the graph to compute gradients.
487
        inputs (Tensor|list(Tensor)|tuple(Tensor)): the input Tensor or
488
            Tensor list/tuple of the graph to compute gradients. The returned
489 490 491 492 493
            values of this API are the gradients of `inputs` .
        grad_outputs (Tensor|list(Tensor|None)|tuple(Tensor|None), optional):
            initial gradient values of `outputs` . If `grad_outputs` is None,
            the initial gradient values of `outputs` would be Tensors filled with 1;
            if `grad_outputs` is not None, it must have the same length as `outputs` ,
Z
Zeng Jinle 已提交
494
            and in this case, the initial gradient value of the i-th `outputs` would
495
            be: (1) a Tensor filled with 1 when the i-th element of `grad_outputs`
Z
Zeng Jinle 已提交
496
            is None; (2) the i-th element of `grad_outputs` when the i-th element of
497
            `grad_outputs` is a Tensor. Default None.
498 499 500
        retain_graph (bool, optional): whether to retain the forward graph which
            is used to calculate the gradient. When it is True, the graph would
            be retained, in which way users can calculate backward twice for the
Z
Zeng Jinle 已提交
501
            same graph. When it is False, the graph would be freed. Default None,
502
            which means it is equal to `create_graph` .
Z
Zeng Jinle 已提交
503 504 505 506 507
        create_graph (bool, optional): whether to create the gradient graphs of
            the computing process. When it is True, higher order derivatives are
            supported to compute; when it is False, the gradient graphs of the
            computing process would be discarded. Default False.
        only_inputs (bool, optional): whether to only compute the gradients of
508 509
            `inputs` . If it is False, the gradients of all remaining leaf
            Tensors in the graph would be also computed and accumulated.
Z
Zeng Jinle 已提交
510 511
            If it is True, only the gradients of `inputs` would be computed.
            Default True. only_inputs=False is under development, and it is
512 513 514 515
            not supported yet.
        allow_unused (bool, optional): whether to raise error or return None if some
            Tensors of `inputs` are unreachable in the graph. If some Tensors of
            `inputs` are unreachable in the graph (i.e., their gradients are None),
Z
Zeng Jinle 已提交
516 517
            error would be raised if allow_unused=False, or None would be returned as
            their gradients if allow_unused=True. Default False.
518
        no_grad_vars (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor), optional):
519
            the Tensors whose gradients are not needed to compute. Default None.
Z
Zeng Jinle 已提交
520 521

    Returns:
522 523
        list: a list of Tensors, whose length is the same as the Tensor number
        inside `inputs`, and the i-th returned Tensor is the sum of gradients of
Z
Zeng Jinle 已提交
524 525
        `outputs` with respect to the i-th `inputs`.

526
    Examples:
Z
Zeng Jinle 已提交
527
        .. code-block:: python
528
            :name: code-example-1
Z
Zeng Jinle 已提交
529

530
            import paddle
Z
Zeng Jinle 已提交
531 532

            def test_dygraph_grad(create_graph):
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
                x = paddle.ones(shape=[1], dtype='float32')
                x.stop_gradient = False
                y = x * x

                # Since y = x * x, dx = 2 * x
                dx = paddle.grad(
                        outputs=[y],
                        inputs=[x],
                        create_graph=create_graph,
                        retain_graph=True)[0]

                z = y + dx

                # If create_graph = False, the gradient of dx
                # would not be backpropagated. Therefore,
                # z = x * x + dx, and x.gradient() = 2 * x = 2.0

                # If create_graph = True, the gradient of dx
                # would be backpropagated. Therefore,
                # z = x * x + dx = x * x + 2 * x, and
                # x.gradient() = 2 * x + 2 = 4.0

                z.backward()
                return x.gradient()

            print(test_dygraph_grad(create_graph=False)) # [2.]
Z
Zeng Jinle 已提交
559 560 561
            print(test_dygraph_grad(create_graph=True)) # [4.]

        .. code-block:: python
562
            :name: code-example-2
Z
Zeng Jinle 已提交
563

564
            import paddle
Z
Zeng Jinle 已提交
565 566

            def test_dygraph_grad(grad_outputs=None):
567
                x = paddle.to_tensor(2.0)
Z
Zeng Jinle 已提交
568 569 570
                x.stop_gradient = False

                y1 = x * x
571
                y2 = x * 3
Z
Zeng Jinle 已提交
572 573 574 575 576 577 578 579 580 581 582

                # If grad_outputs=None, dy1 = [1], dy2 = [1].
                # If grad_outputs=[g1, g2], then:
                #    - dy1 = [1] if g1 is None else g1
                #    - dy2 = [1] if g2 is None else g2

                # Since y1 = x * x, dx = 2 * x * dy1.
                # Since y2 = x * 3, dx = 3 * dy2.
                # Therefore, the final result would be:
                # dx = 2 * x * dy1 + 3 * dy2 = 4 * dy1 + 3 * dy2.

583
                dx = paddle.grad(
584
                    outputs=[y1, y2],
Z
Zeng Jinle 已提交
585 586 587 588 589
                    inputs=[x],
                    grad_outputs=grad_outputs)[0]

                return dx.numpy()

590
            grad_value = paddle.to_tensor(4.0)
Z
Zeng Jinle 已提交
591 592 593 594
            # dy1 = [1], dy2 = [1]
            print(test_dygraph_grad(None)) # [7.]

            # dy1 = [1], dy2 = [4]
595
            print(test_dygraph_grad([None, grad_value])) # [16.]
Z
Zeng Jinle 已提交
596 597

            # dy1 = [4], dy2 = [1]
598
            print(test_dygraph_grad([grad_value, None])) # [19.]
Z
Zeng Jinle 已提交
599 600

            # dy1 = [3], dy2 = [4]
601
            grad_y1 = paddle.to_tensor(3.0)
602
            print(test_dygraph_grad([grad_y1, grad_value])) # [24.]
603
    '''
604 605 606 607
    if in_declarative_mode():
        # In dy2static context, we call static interface `gradients`
        # to calculate grads.
        from paddle.static import gradients
608

609 610 611 612
        declarative_unsupport_argument_warning(
            "paddle.grad",
            ["retain_graph", "create_grad", "only_inputs", "allow_unused"],
            [retain_graph, create_graph, only_inputs, allow_unused],
613 614
            [None, False, True, False],
        )
615
        return gradients(outputs, inputs, grad_outputs, no_grad_vars)
Z
Zeng Jinle 已提交
616

617 618 619 620 621 622
    def check_in_out(in_out_list, name):
        assert in_out_list is not None, "{} should not be None".format(name)

        if isinstance(in_out_list, (list, tuple)):
            assert len(in_out_list) > 0, "{} cannot be empty".format(name)
            for each_var in in_out_list:
J
Jiabin Yang 已提交
623
                if _in_eager_without_dygraph_check():
624
                    assert isinstance(
625 626
                        each_var, core.eager.Tensor
                    ), "Elements of {} must be Tensor".format(name)
627 628
                else:
                    assert isinstance(
629 630
                        each_var, core.VarBase
                    ), "Elements of {} must be Variable".format(name)
631 632
            return in_out_list
        else:
J
Jiabin Yang 已提交
633
            if _in_eager_without_dygraph_check():
634
                assert isinstance(
635 636
                    in_out_list, core.eager.Tensor
                ), "{} must be Tensor or list of Tensor".format(name)
637 638 639 640
            else:
                assert isinstance(
                    in_out_list, core.VarBase
                ), "{} must be Variable or list of Variable".format(name)
641 642 643 644 645 646 647 648 649 650 651
            return [in_out_list]

    outputs = check_in_out(outputs, 'outputs')
    inputs = check_in_out(inputs, 'inputs')

    if grad_outputs is not None:
        if not isinstance(grad_outputs, (list, tuple)):
            grad_outputs = [grad_outputs]

        for each_var in grad_outputs:
            if each_var is not None:
J
Jiabin Yang 已提交
652
                if _in_eager_without_dygraph_check():
653 654 655 656 657 658 659
                    assert isinstance(
                        each_var, core.eager.Tensor
                    ), "grad_outputs must be None, a Variable or a list containing None or Variables"
                else:
                    assert isinstance(
                        each_var, core.VarBase
                    ), "grad_outputs must be None, a Variable or a list containing None or Variables"
660 661 662 663 664
    else:
        grad_outputs = []

    if len(grad_outputs) > 0:
        assert len(grad_outputs) == len(
665 666
            outputs
        ), "The length of grad_outputs must be equal to outputs"
667

Z
Zeng Jinle 已提交
668 669
    if no_grad_vars is None:
        no_grad_vars = []
H
hong 已提交
670
    elif isinstance(no_grad_vars, (core.VarBase, core.eager.Tensor)):
Z
Zeng Jinle 已提交
671
        no_grad_vars = [no_grad_vars]
672 673
    elif isinstance(no_grad_vars, core.eager.Tensor):
        no_grad_vars = [no_grad_vars]
Z
Zeng Jinle 已提交
674 675 676
    elif isinstance(no_grad_vars, (list, tuple, set)):
        no_grad_vars = list(no_grad_vars)
        for var in no_grad_vars:
J
Jiabin Yang 已提交
677
            if _in_eager_without_dygraph_check():
678
                assert isinstance(
679 680
                    var, core.eager.Tensor
                ), "no_grad_vars can only contains Tensor"
681 682
            else:
                assert isinstance(
683 684
                    var, core.VarBase
                ), "no_grad_vars can only contains Variable"
685
    else:
J
Jiabin Yang 已提交
686
        if _in_eager_without_dygraph_check():
687
            raise AssertionError(
688 689
                "no_grad_vars must be None, Tensor or list/tuple/set of Tensors"
            )
690 691 692 693
        else:
            raise AssertionError(
                "no_grad_vars must be None, Variable or list/tuple/set of Variables"
            )
694 695 696

    assert isinstance(create_graph, bool), "create_graph must be True or False"

Z
Zeng Jinle 已提交
697 698 699
    if retain_graph is None:
        retain_graph = create_graph

700 701 702
    assert isinstance(
        retain_graph, bool
    ), "retain_graph must be None, True or False"
Z
Zeng Jinle 已提交
703 704 705 706 707 708

    assert isinstance(allow_unused, bool), "allow_unused must be True or False"

    assert isinstance(only_inputs, bool), "only_inputs must be True or False"
    assert only_inputs, "only_inputs=False is not supported yet"

J
Jiabin Yang 已提交
709
    if _in_eager_without_dygraph_check():
710 711 712 713 714 715 716 717 718 719
        return core.eager.run_partial_grad(
            outputs,
            inputs,
            grad_outputs,
            retain_graph,
            create_graph,
            only_inputs,
            allow_unused,
            no_grad_vars,
        )
J
Jiabin Yang 已提交
720 721 722
    else:
        place = core.Place()
        place.set_place(framework._current_expected_place())
723 724 725 726 727 728 729 730 731 732 733
        return core.dygraph_partial_grad(
            inputs,
            outputs,
            grad_outputs,
            no_grad_vars,
            place,
            create_graph,
            retain_graph,
            allow_unused,
            only_inputs,
        )
734 735


736
@framework.dygraph_only
737
def to_variable(value, name=None, zero_copy=None, dtype=None):
738
    r"""
739 740
    :api_attr: imperative

741
    The API will create a ``Variable`` object from
C
chentianyu03 已提交
742
    tuple, list, numpy\.ndarray or Variable object.
743

744
    Parameters:
745
        value(tuple|list|ndarray|Variable|Tensor): Initial data.
C
chentianyu03 已提交
746
            Can be a list, tuple, NumPy ndarray, Variable, Tensor.
747 748
            The shape can be multi-dimensional. The data type is one of
            numpy\.{float16, float32, float64, int16, int32, int64,
749
            uint8, uint16, complex64, complex128}.
750 751 752 753 754
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name` .
        zero_copy(bool, optional): Whether to share memory with the input numpy
            array. This parameter only works with CPUPlace and will be set to
L
Leo Chen 已提交
755
            True when it is None. Default: None. (Note: zero_copy is discarded temporally for some reason.)
756
        dtype(str, optional): The desired data type of returned ``Variable`` .
757
            Can be 'bool' , 'float16' , 'float32' , 'float64' , 'int8' , 'int16' ,
758
            'int32' , 'int64' , 'uint8' . Default: None.
759

760
    Returns:
761 762 763
        Variable : If ``value`` is a tuple/list/numpy\.ndarray object,
            return ``Tensor`` created from the corresponding numpy\.ndarray object, which has
            same data type and shape with ``value``.
764

765 766 767 768 769 770 771 772

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

773
        with fluid.dygraph.guard(fluid.CPUPlace()):
774
            x = np.ones([2, 2], np.float32)
775 776 777
            y = fluid.dygraph.to_variable(x, zero_copy=False)
            x[0][0] = -1
            y[0][0].numpy()  # array([1.], dtype=float32)
778
            y = fluid.dygraph.to_variable(x)
779 780
            x[0][0] = 0
            y[0][0].numpy()  # array([0.], dtype=float32)
781 782 783 784
            c = np.array([2+1j, 2])
            z = fluid.dygraph.to_variable(c)
            z.numpy() # array([2.+1.j, 2.+0.j])
            z.dtype # 'complex128'
785 786 787 788 789 790 791

            y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
            y.shape     # [3L, 2L]

            y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32')
            y.shape     # [3L, 2L]

792
    """
793 794 795 796 797 798 799 800 801 802
    support_type = (
        list,
        tuple,
        np.ndarray,
        core.eager.Tensor,
        core.VarBase,
        framework.Variable,
        core.Tensor,
        core.LoDTensor,
    )
803 804 805
    if not isinstance(value, support_type):
        raise TypeError(
            "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s."
806 807
            % (support_type, type(value))
        )
H
hong 已提交
808
    if isinstance(value, (core.eager.Tensor, core.VarBase, framework.Variable)):
809 810 811 812
        return value
    elif isinstance(value, (core.Tensor, core.LoDTensor)):
        return core.VarBase(value)
    else:
813 814 815 816
        if isinstance(
            framework._current_expected_place(), framework.core.CPUPlace
        ):
            # TODO(zhiqiu): we found two problems when enable zero_copy on CPUPlace.
817
            # (1): eigen requires 16-bytes alignments, but the data of numpy array may not statisfy.
L
Leo Chen 已提交
818 819 820 821 822 823 824 825 826
            # Details: https://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
            # (2): when used in flask framework, it may result in hang.
            # Details: https://github.com/PaddlePaddle/Paddle/issues/26635
            # So, we temporally diable the zero_copy strategy.
            if zero_copy == True:
                warnings.warn(
                    "Currently, zero_copy is not supported, and it will be discarded."
                )
                zero_copy = False
827
        else:
828 829 830
            assert (
                not zero_copy
            ), "zero_copy mode can only be used with CPUPlace"
831 832 833 834 835 836 837 838 839

        if not isinstance(value, np.ndarray):
            value = np.array(value)

        if dtype is not None:
            dtype = convert_dtype(dtype)
            if value.dtype != dtype:
                value = value.astype(dtype)

J
Jiabin Yang 已提交
840
        if _in_eager_without_dygraph_check():
841 842 843 844 845 846 847 848
            return core.eager.Tensor(
                value,
                framework._current_expected_place(),
                False,
                zero_copy,
                name if name else None,
                True,
            )
849
        else:
850 851 852 853 854 855 856
            py_var = core.VarBase(
                value=value,
                place=framework._current_expected_place(),
                persistable=False,
                zero_copy=zero_copy,
                name=name if name else '',
            )
857
            return py_var