base.py 29.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from ..wrapped_decorator import signature_safe_contextmanager, wrap_decorator
S
songyouwei 已提交
15
import decorator
16
import contextlib
17 18
import functools
import inspect
19
import sys
20 21 22
import numpy as np
from paddle.fluid import core
from paddle.fluid import framework
23
from paddle.fluid.framework import global_var
H
hong 已提交
24
from paddle.fluid.multiprocess_utils import CleanupFuncRegistrar
M
minqiyang 已提交
25
from .tracer import Tracer
Z
Zeng Jinle 已提交
26
import logging
27
from ..data_feeder import convert_dtype
L
Leo Chen 已提交
28
import warnings
29 30 31 32
from ..framework import (
    _get_paddle_place,
    _in_eager_without_dygraph_check,
)
33
import paddle
34
import warnings
35

36
__all__ = [
37 38 39 40 41 42 43 44
    'no_grad',
    'no_grad_',
    'grad',
    'guard',
    'enable_dygraph',
    'disable_dygraph',
    'enabled',
    'to_variable',
45
]
46

H
hjyp 已提交
47
# Flag that indicates whether running code under `@to_static`
48 49 50 51


def in_declarative_mode():
    """
H
hjyp 已提交
52
    Return a bool value that indicates whether running code under `@to_static`
53 54

    """
55
    return global_var._in_declarative_mode_
56

57

58 59 60
def declarative_unsupport_argument_warning(
    func_name, input_names, inputs, support_values
):
61 62 63 64 65 66 67 68
    """
    Warning if inputs do not elementwisely equals to support_values.
    It's a utility function for dy2static when dygraph interface have
    more inputs than static interface such as paddle.grad.

    """
    for name, inp, sup in zip(input_names, inputs, support_values):
        if inp != sup:
69 70 71 72
            warnings.warn(
                f"{func_name} has unsupported parameter in jit: "
                + f"{name}, jit will discard it"
            )
73 74


75 76 77 78 79 80 81 82 83 84 85
def _switch_to_static_graph_(func):
    def __impl__(*args, **kwargs):
        with framework._dygraph_guard(None):
            return func(*args, **kwargs)

    return __impl__


switch_to_static_graph = wrap_decorator(_switch_to_static_graph_)


86 87 88
@signature_safe_contextmanager
def _switch_declarative_mode_guard_(is_declarative=True):

89 90 91
    global global_var
    original_val = global_var._in_declarative_mode_
    global_var._in_declarative_mode_ = is_declarative
92
    yield
93
    global_var._in_declarative_mode_ = original_val
94 95


96 97 98 99 100 101
@signature_safe_contextmanager
def program_desc_tracing_guard(enable):
    tracer = framework._dygraph_tracer()
    if tracer:
        original_val = tracer._enable_program_desc_tracing
        tracer._enable_program_desc_tracing = enable
102 103 104 105 106
    try:
        yield
    finally:
        if tracer:
            tracer._enable_program_desc_tracing = original_val
107 108


109 110
@signature_safe_contextmanager
def param_guard(parameters):
111
    # Note: parameters is a reference of self._parameters or self._buffers
姜永久 已提交
112
    if in_declarative_mode() and not framework.in_dygraph_mode() and parameters:
113 114
        origin_parameters = parameters.copy()
        for name, var_base in parameters.items():
115 116 117 118 119
            if isinstance(var_base, list):
                new_var = [_convert_into_variable(var) for var in var_base]
            else:
                new_var = _convert_into_variable(var_base)
            parameters[name] = new_var
120 121 122 123 124 125
        yield
        parameters.update(origin_parameters)
    else:
        yield


J
Jiabin Yang 已提交
126
def _convert_into_variable(tensor):
127 128 129
    """
    Convert Varbase into Variable.
    """
J
Jiabin Yang 已提交
130
    if isinstance(tensor, (core.eager.Tensor, core.VarBase)):
131
        # Check whether has been created before.
J
Jiabin Yang 已提交
132
        new_var = tensor.block._find_var_recursive(tensor.name)
133 134 135
        if new_var is not None:
            assert isinstance(new_var, framework.Variable)
        # Convert ParamBase into Parameter with same attributes in dy2stat.
136 137 138
        elif isinstance(
            tensor, (framework.EagerParamBase, framework.ParamBase)
        ):
J
Jiabin Yang 已提交
139
            new_var = tensor._to_static_var(to_parameter=True)
140 141 142 143 144 145 146 147 148
        else:
            # Note(Aurelius84): Convert VarBase in self._buffers into Variable with
            # same attributes and set persistable=True to allow saving this var.
            # Because users can create a VarBase in `__init__`  like a
            # `mask` Tensor or `hidden_0` in RNN layers, which is equivalent to a Parameter
            # and necessary for inferring. It will be pruned if it's not necessary for inferring.

            # But if its shape is empty while created from `create_variable()`, we consider this buffer
            # non-persistable. See case of `drop_state` in lstm api.
J
Jiabin Yang 已提交
149
            is_persistable = len(tensor.shape) > 0
150

151 152 153
            new_var = tensor._to_static_var(
                to_parameter=False, persistable=is_persistable
            )
154 155 156 157 158 159 160 161 162 163 164 165
        # add param into parameter recorder to collect all the params used in this program.
        if new_var.persistable is True:
            # TODO(@xiongkun): 0d-tensor may be affected at present,
            # but there is no particularly good method to identify whether 0d-tensor
            # is used as buffer or "drop_out_state" in LSTM buffer variable.
            from paddle.jit.dy2static.program_translator import (
                ProgramTranslator,
            )

            ProgramTranslator.get_instance()._params_recorder.add(
                tensor.block.program, tensor
            )
166 167
        return new_var
    else:
J
Jiabin Yang 已提交
168
        return tensor
169 170


171
def enabled():
172 173 174
    """
    This function checks whether the program runs in dynamic graph mode or not.
    You can enter dynamic graph mode with :ref:`api_fluid_dygraph_guard` api,
175 176
    or enable and disable dynamic graph mode with :ref:`api_fluid_dygraph_enable_dygraph`
    and :ref:`api_fluid_dygraph_disable_dygraph` api .
177 178

    **Note**:
J
Jiabin Yang 已提交
179 180
        ``fluid.dygraph.enabled`` is the alias of ``fluid.in_dygraph_mode``, and
        ``fluid.in_dygraph_mode`` is recommended to use for now.
181 182 183 184 185 186 187 188 189 190 191 192 193 194

    Returns:
        bool: Whether the program is running in dynamic graph mode.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid

            fluid.enable_dygraph()  # Now we are in dygragh mode
            print(fluid.dygraph.enabled())  # True
            fluid.disable_dygraph()
            print(fluid.dygraph.enabled())  # False
    """
J
Jiabin Yang 已提交
195
    # TODO(jiabin): Make this check as in_dygraph_mode when we support default eager mode.
姜永久 已提交
196
    return framework.in_dygraph_mode()
197 198


199 200
def enable_dygraph(place=None):
    """
201 202 203 204 205

    .. note::
        Dynamic graph mode is turn ON by default since paddle 2.0.0

    This API turn OFF static graph mode. You can turn ON static graph mode by `enable_static <./disable_dygraph_en.html>`_ .
206 207

    Parameters:
208
        place(paddle.CPUPlace|paddle.CUDAPlace|str, optional): Place to run dynamic graph. Default: None. Which means that the running place will be
209 210
            determined according to the way of paddle compilation. If ``place`` is string, It can be ``cpu``, and ``gpu:x``, where ``x`` is the
            index of the GPUs.
211 212 213 214 215 216 217

    return:
        None

    Examples:
        .. code-block:: python

218 219 220 221
            import paddle
            print(paddle.in_dynamic_mode())  # True, dynamic mode is turn ON by default since paddle 2.0.0

            paddle.enable_static()
222
            print(paddle.in_dynamic_mode())  # False, Now we are in static graph mode
223 224 225

            paddle.disable_static()
            print(paddle.in_dynamic_mode())  # True, Now we are in dynamic mode
226 227

    """
228 229 230
    global global_var
    if global_var._functional_dygraph_context_manager is None:
        global_var._functional_dygraph_context_manager = guard(
231 232
            place=_get_paddle_place(place)
        )
233
        global_var._functional_dygraph_context_manager.__enter__()
234

H
hong 已提交
235 236 237
        # call disable_dygraph when Python exit
        CleanupFuncRegistrar.register(disable_dygraph)

238 239 240

def disable_dygraph():
    """
241 242 243 244 245

    .. note::
        Dynamic graph mode is turn ON by default since paddle 2.0.0

    This API turn ON static graph mode. You can turn ON static graph mode by `disable_static <./enable_dygraph_en.html>`_ .
246 247 248 249 250 251 252

    return:
        None

    Examples:
        .. code-block:: python

253 254 255 256
            import paddle
            print(paddle.in_dynamic_mode())  # True, dynamic mode is turn ON by default since paddle 2.0.0

            paddle.enable_static()
257
            print(paddle.in_dynamic_mode())  # False, Now we are in static graph mode
258 259 260

            paddle.disable_static()
            print(paddle.in_dynamic_mode())  # True, Now we are in dynamic mode
261 262

    """
263 264 265 266
    global global_var
    if global_var._functional_dygraph_context_manager is not None:
        global_var._functional_dygraph_context_manager.__exit__(*sys.exc_info())
        global_var._functional_dygraph_context_manager = None
267 268


269 270 271 272
@signature_safe_contextmanager
def _switch_tracer_mode_guard_(is_train=True):
    tracer = framework._dygraph_tracer()
    if tracer:
273 274
        has_grad = tracer._has_grad
        tracer._has_grad = is_train
275 276 277
        try:
            yield
        finally:
278
            tracer._has_grad = has_grad
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
    else:
        yield


def no_grad(func=None):
    """
    :api_attr: imperative

    Create a context which disables dygraph gradient calculation.
    In this mode, the result of every computation will have `stop_gradient=True`.

    Also functions as a decorator. (Make sure to instantiate without parenthesis.)

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        # use as generator

        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
            l0 = fluid.Linear(2, 2)  # l0.weight.gradient() is None
            l1 = fluid.Linear(2, 2)
            with fluid.dygraph.no_grad():
                # l1.weight.stop_gradient is False
                tmp = l1.weight * 2  # tmp.stop_gradient is True
            x = fluid.dygraph.to_variable(data)
            y = l0(x) + tmp
            o = l1(y)
            o.backward()
            print(tmp.gradient() is None)  # True
            print(l0.weight.gradient() is None)  # False

        # use as decorator

        @fluid.dygraph.no_grad
        def test_layer():
            with fluid.dygraph.guard():
                inp = np.ones([3, 1024], dtype='float32')
                t = fluid.dygraph.base.to_variable(inp)
                linear1 = fluid.Linear(1024, 4, bias_attr=False)
                linear2 = fluid.Linear(4, 4)
                ret = linear1(t)
                dy_ret = linear2(ret)

        test_layer()

    """
330 331 332 333
    if in_declarative_mode():
        warnings.warn(
            "paddle.no_grad is only supported for inference model, and not supported for training under @to_static."
        )
334 335 336 337 338 339 340 341 342 343 344 345 346
    if func is None:
        return _switch_tracer_mode_guard_(is_train=False)
    else:

        @decorator.decorator
        def __impl__(func, *args, **kwargs):
            with _switch_tracer_mode_guard_(is_train=False):
                return func(*args, **kwargs)

        return __impl__(func)


class no_grad_:
347
    """
348 349
    :api_attr: imperative

350
    Create a context which disables dygraph gradient calculation.
351 352
    In this mode, the result of every computation will have `stop_gradient` set
    to `True`.
353

354
    Also functions as a decorator. (Make sure to use an instance.)
355 356 357 358 359 360

    Examples:

     .. code-block:: python

        import numpy as np
361
        import paddle
362

363 364 365
        # use as generator

        data = np.array([[2, 3], [4, 5]]).astype('float32')
366 367 368
        l0 = paddle.nn.Linear(2, 2)  # l0.weight.gradient() is None
        l1 = paddle.nn.Linear(2, 2)
        with paddle.no_grad():
369 370
            # l1.weight.stop_gradient is False
            tmp = l1.weight * 2  # tmp.stop_gradient is True
371
        x = paddle.to_tensor(data)
372 373 374 375 376
        y = l0(x) + tmp
        o = l1(y)
        o.backward()
        print(tmp.gradient() is None)  # True
        print(l0.weight.gradient() is None)  # False
377 378 379

        # use as decorator

380
        @paddle.no_grad()
381
        def test_layer():
382
            inp = np.ones([3, 1024], dtype='float32')
383 384 385
            t = paddle.to_tensor(inp)
            linear1 = paddle.nn.Linear(1024, 4, bias_attr=False)
            linear2 = paddle.nn.Linear(4, 4)
386 387
            ret = linear1(t)
            dy_ret = linear2(ret)
388 389 390 391

        test_layer()
    """

392
    def __call__(self, func):
S
songyouwei 已提交
393
        @decorator.decorator
394 395
        def _decorate_function(func, *args, **kwargs):
            with self:
396
                return func(*args, **kwargs)
397

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
        @decorator.decorator
        def _decorate_generator(func, *args, **kwargs):
            gen = func(*args, **kwargs)
            with self:
                for x in gen:
                    yield x

        if inspect.isgeneratorfunction(func):
            return _decorate_generator(func)
        else:
            return _decorate_function(func)

    def __enter__(self):
        tracer = framework._dygraph_tracer()
        if tracer:
413 414
            self.orig = tracer._has_grad
            tracer._has_grad = False
415 416 417 418

    def __exit__(self, *args):
        tracer = framework._dygraph_tracer()
        if tracer:
419
            tracer._has_grad = self.orig
420 421


S
rename  
sneaxiy 已提交
422
@signature_safe_contextmanager
P
Paddle CI 已提交
423
def guard(place=None):
424
    """
425 426
    :api_attr: imperative

427
    This context will create a dygraph context for dygraph to run, using python ``with`` statement.
428

429
    Parameters:
430
        place(fluid.CPUPlace| fluid.CUDAPlace|str, optional): Place to execute dygraph.
431 432 433
            If None, the running place will be determined according to the way of paddle compilation.
            If ``place`` is string, It can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None
434 435 436 437 438 439 440 441 442 443 444 445

    return:
        None

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard():
446
            inp = np.ones([3, 1024], dtype='float32')
447
            t = fluid.dygraph.base.to_variable(inp)
448 449 450 451
            linear1 = fluid.Linear(1024, 4, bias_attr=False)
            linear2 = fluid.Linear(4, 4)
            ret = linear1(t)
            dy_ret = linear2(ret)
452 453

    """
454 455
    train = framework.Program()
    startup = framework.Program()
J
Jiabin Yang 已提交
456
    tracer = Tracer()
457
    VarBase = core.VarBase
M
minqiyang 已提交
458

459
    if place is not None:
460
        expected_place = _get_paddle_place(place)
461 462
    else:
        expected_place = framework._current_expected_place()
M
minqiyang 已提交
463

464 465
    with framework.program_guard(train, startup):
        with framework.unique_name.guard():
L
lujun 已提交
466
            with framework._dygraph_guard(tracer):
467
                with framework._dygraph_place_guard(expected_place):
P
Paddle CI 已提交
468
                    yield
469 470


471
@framework.non_static_only
472 473 474 475 476 477 478 479 480 481
def grad(
    outputs,
    inputs,
    grad_outputs=None,
    retain_graph=None,
    create_graph=False,
    only_inputs=True,
    allow_unused=False,
    no_grad_vars=None,
):
482
    '''
Z
Zeng Jinle 已提交
483
    .. note::
484
        **This API is ONLY available in imperative mode.**
Z
Zeng Jinle 已提交
485 486 487 488

    This API computes the sum of gradients of `outputs` with respect to each `inputs` .

    Parameters:
489
        outputs (Tensor|list(Tensor)|tuple(Tensor)): the output Tensor or
490
            Tensor list/tuple of the graph to compute gradients.
491
        inputs (Tensor|list(Tensor)|tuple(Tensor)): the input Tensor or
492
            Tensor list/tuple of the graph to compute gradients. The returned
493 494 495 496 497
            values of this API are the gradients of `inputs` .
        grad_outputs (Tensor|list(Tensor|None)|tuple(Tensor|None), optional):
            initial gradient values of `outputs` . If `grad_outputs` is None,
            the initial gradient values of `outputs` would be Tensors filled with 1;
            if `grad_outputs` is not None, it must have the same length as `outputs` ,
Z
Zeng Jinle 已提交
498
            and in this case, the initial gradient value of the i-th `outputs` would
499
            be: (1) a Tensor filled with 1 when the i-th element of `grad_outputs`
Z
Zeng Jinle 已提交
500
            is None; (2) the i-th element of `grad_outputs` when the i-th element of
501
            `grad_outputs` is a Tensor. Default None.
502 503 504
        retain_graph (bool, optional): whether to retain the forward graph which
            is used to calculate the gradient. When it is True, the graph would
            be retained, in which way users can calculate backward twice for the
Z
Zeng Jinle 已提交
505
            same graph. When it is False, the graph would be freed. Default None,
506
            which means it is equal to `create_graph` .
Z
Zeng Jinle 已提交
507 508 509 510 511
        create_graph (bool, optional): whether to create the gradient graphs of
            the computing process. When it is True, higher order derivatives are
            supported to compute; when it is False, the gradient graphs of the
            computing process would be discarded. Default False.
        only_inputs (bool, optional): whether to only compute the gradients of
512 513
            `inputs` . If it is False, the gradients of all remaining leaf
            Tensors in the graph would be also computed and accumulated.
Z
Zeng Jinle 已提交
514 515
            If it is True, only the gradients of `inputs` would be computed.
            Default True. only_inputs=False is under development, and it is
516 517 518 519
            not supported yet.
        allow_unused (bool, optional): whether to raise error or return None if some
            Tensors of `inputs` are unreachable in the graph. If some Tensors of
            `inputs` are unreachable in the graph (i.e., their gradients are None),
Z
Zeng Jinle 已提交
520 521
            error would be raised if allow_unused=False, or None would be returned as
            their gradients if allow_unused=True. Default False.
522
        no_grad_vars (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor), optional):
523
            the Tensors whose gradients are not needed to compute. Default None.
Z
Zeng Jinle 已提交
524 525

    Returns:
526 527
        list: a list of Tensors, whose length is the same as the Tensor number
        inside `inputs`, and the i-th returned Tensor is the sum of gradients of
Z
Zeng Jinle 已提交
528 529
        `outputs` with respect to the i-th `inputs`.

530
    Examples:
Z
Zeng Jinle 已提交
531
        .. code-block:: python
532
            :name: code-example-1
Z
Zeng Jinle 已提交
533

534
            import paddle
Z
Zeng Jinle 已提交
535 536

            def test_dygraph_grad(create_graph):
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
                x = paddle.ones(shape=[1], dtype='float32')
                x.stop_gradient = False
                y = x * x

                # Since y = x * x, dx = 2 * x
                dx = paddle.grad(
                        outputs=[y],
                        inputs=[x],
                        create_graph=create_graph,
                        retain_graph=True)[0]

                z = y + dx

                # If create_graph = False, the gradient of dx
                # would not be backpropagated. Therefore,
                # z = x * x + dx, and x.gradient() = 2 * x = 2.0

                # If create_graph = True, the gradient of dx
                # would be backpropagated. Therefore,
                # z = x * x + dx = x * x + 2 * x, and
                # x.gradient() = 2 * x + 2 = 4.0

                z.backward()
                return x.gradient()

            print(test_dygraph_grad(create_graph=False)) # [2.]
Z
Zeng Jinle 已提交
563 564 565
            print(test_dygraph_grad(create_graph=True)) # [4.]

        .. code-block:: python
566
            :name: code-example-2
Z
Zeng Jinle 已提交
567

568
            import paddle
Z
Zeng Jinle 已提交
569 570

            def test_dygraph_grad(grad_outputs=None):
571
                x = paddle.to_tensor(2.0)
Z
Zeng Jinle 已提交
572 573 574
                x.stop_gradient = False

                y1 = x * x
575
                y2 = x * 3
Z
Zeng Jinle 已提交
576 577 578 579 580 581 582 583 584 585 586

                # If grad_outputs=None, dy1 = [1], dy2 = [1].
                # If grad_outputs=[g1, g2], then:
                #    - dy1 = [1] if g1 is None else g1
                #    - dy2 = [1] if g2 is None else g2

                # Since y1 = x * x, dx = 2 * x * dy1.
                # Since y2 = x * 3, dx = 3 * dy2.
                # Therefore, the final result would be:
                # dx = 2 * x * dy1 + 3 * dy2 = 4 * dy1 + 3 * dy2.

587
                dx = paddle.grad(
588
                    outputs=[y1, y2],
Z
Zeng Jinle 已提交
589 590 591 592 593
                    inputs=[x],
                    grad_outputs=grad_outputs)[0]

                return dx.numpy()

594
            grad_value = paddle.to_tensor(4.0)
Z
Zeng Jinle 已提交
595 596 597 598
            # dy1 = [1], dy2 = [1]
            print(test_dygraph_grad(None)) # [7.]

            # dy1 = [1], dy2 = [4]
599
            print(test_dygraph_grad([None, grad_value])) # [16.]
Z
Zeng Jinle 已提交
600 601

            # dy1 = [4], dy2 = [1]
602
            print(test_dygraph_grad([grad_value, None])) # [19.]
Z
Zeng Jinle 已提交
603 604

            # dy1 = [3], dy2 = [4]
605
            grad_y1 = paddle.to_tensor(3.0)
606
            print(test_dygraph_grad([grad_y1, grad_value])) # [24.]
607
    '''
608 609 610 611
    if in_declarative_mode():
        # In dy2static context, we call static interface `gradients`
        # to calculate grads.
        from paddle.static import gradients
612

613 614 615 616
        declarative_unsupport_argument_warning(
            "paddle.grad",
            ["retain_graph", "create_grad", "only_inputs", "allow_unused"],
            [retain_graph, create_graph, only_inputs, allow_unused],
617 618
            [None, False, True, False],
        )
619
        return gradients(outputs, inputs, grad_outputs, no_grad_vars)
Z
Zeng Jinle 已提交
620

621 622 623 624 625 626
    def check_in_out(in_out_list, name):
        assert in_out_list is not None, "{} should not be None".format(name)

        if isinstance(in_out_list, (list, tuple)):
            assert len(in_out_list) > 0, "{} cannot be empty".format(name)
            for each_var in in_out_list:
J
Jiabin Yang 已提交
627
                if _in_eager_without_dygraph_check():
628
                    assert isinstance(
629 630
                        each_var, core.eager.Tensor
                    ), "Elements of {} must be Tensor".format(name)
631 632
                else:
                    assert isinstance(
633 634
                        each_var, core.VarBase
                    ), "Elements of {} must be Variable".format(name)
635 636
            return in_out_list
        else:
J
Jiabin Yang 已提交
637
            if _in_eager_without_dygraph_check():
638
                assert isinstance(
639 640
                    in_out_list, core.eager.Tensor
                ), "{} must be Tensor or list of Tensor".format(name)
641 642 643 644
            else:
                assert isinstance(
                    in_out_list, core.VarBase
                ), "{} must be Variable or list of Variable".format(name)
645 646 647 648 649 650 651 652 653 654 655
            return [in_out_list]

    outputs = check_in_out(outputs, 'outputs')
    inputs = check_in_out(inputs, 'inputs')

    if grad_outputs is not None:
        if not isinstance(grad_outputs, (list, tuple)):
            grad_outputs = [grad_outputs]

        for each_var in grad_outputs:
            if each_var is not None:
J
Jiabin Yang 已提交
656
                if _in_eager_without_dygraph_check():
657 658 659 660 661 662 663
                    assert isinstance(
                        each_var, core.eager.Tensor
                    ), "grad_outputs must be None, a Variable or a list containing None or Variables"
                else:
                    assert isinstance(
                        each_var, core.VarBase
                    ), "grad_outputs must be None, a Variable or a list containing None or Variables"
664 665 666 667 668
    else:
        grad_outputs = []

    if len(grad_outputs) > 0:
        assert len(grad_outputs) == len(
669 670
            outputs
        ), "The length of grad_outputs must be equal to outputs"
671

Z
Zeng Jinle 已提交
672 673
    if no_grad_vars is None:
        no_grad_vars = []
H
hong 已提交
674
    elif isinstance(no_grad_vars, (core.VarBase, core.eager.Tensor)):
Z
Zeng Jinle 已提交
675
        no_grad_vars = [no_grad_vars]
676 677
    elif isinstance(no_grad_vars, core.eager.Tensor):
        no_grad_vars = [no_grad_vars]
Z
Zeng Jinle 已提交
678 679 680
    elif isinstance(no_grad_vars, (list, tuple, set)):
        no_grad_vars = list(no_grad_vars)
        for var in no_grad_vars:
J
Jiabin Yang 已提交
681
            if _in_eager_without_dygraph_check():
682
                assert isinstance(
683 684
                    var, core.eager.Tensor
                ), "no_grad_vars can only contains Tensor"
685 686
            else:
                assert isinstance(
687 688
                    var, core.VarBase
                ), "no_grad_vars can only contains Variable"
689
    else:
J
Jiabin Yang 已提交
690
        if _in_eager_without_dygraph_check():
691
            raise AssertionError(
692 693
                "no_grad_vars must be None, Tensor or list/tuple/set of Tensors"
            )
694 695 696 697
        else:
            raise AssertionError(
                "no_grad_vars must be None, Variable or list/tuple/set of Variables"
            )
698 699 700

    assert isinstance(create_graph, bool), "create_graph must be True or False"

Z
Zeng Jinle 已提交
701 702 703
    if retain_graph is None:
        retain_graph = create_graph

704 705 706
    assert isinstance(
        retain_graph, bool
    ), "retain_graph must be None, True or False"
Z
Zeng Jinle 已提交
707 708 709 710 711 712

    assert isinstance(allow_unused, bool), "allow_unused must be True or False"

    assert isinstance(only_inputs, bool), "only_inputs must be True or False"
    assert only_inputs, "only_inputs=False is not supported yet"

J
Jiabin Yang 已提交
713
    if _in_eager_without_dygraph_check():
714 715 716 717 718 719 720 721 722 723
        return core.eager.run_partial_grad(
            outputs,
            inputs,
            grad_outputs,
            retain_graph,
            create_graph,
            only_inputs,
            allow_unused,
            no_grad_vars,
        )
J
Jiabin Yang 已提交
724 725 726
    else:
        place = core.Place()
        place.set_place(framework._current_expected_place())
727 728 729 730 731 732 733 734 735 736 737
        return core.dygraph_partial_grad(
            inputs,
            outputs,
            grad_outputs,
            no_grad_vars,
            place,
            create_graph,
            retain_graph,
            allow_unused,
            only_inputs,
        )
738 739


740
@framework.dygraph_only
741
def to_variable(value, name=None, zero_copy=None, dtype=None):
742
    r"""
743 744
    :api_attr: imperative

745
    The API will create a ``Variable`` object from
C
chentianyu03 已提交
746
    tuple, list, numpy\.ndarray or Variable object.
747

748
    Parameters:
749
        value(tuple|list|ndarray|Variable|Tensor): Initial data.
C
chentianyu03 已提交
750
            Can be a list, tuple, NumPy ndarray, Variable, Tensor.
751 752
            The shape can be multi-dimensional. The data type is one of
            numpy\.{float16, float32, float64, int16, int32, int64,
753
            uint8, uint16, complex64, complex128}.
754 755 756 757 758
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name` .
        zero_copy(bool, optional): Whether to share memory with the input numpy
            array. This parameter only works with CPUPlace and will be set to
L
Leo Chen 已提交
759
            True when it is None. Default: None. (Note: zero_copy is discarded temporally for some reason.)
760
        dtype(str, optional): The desired data type of returned ``Variable`` .
761
            Can be 'bool' , 'float16' , 'float32' , 'float64' , 'int8' , 'int16' ,
762
            'int32' , 'int64' , 'uint8' . Default: None.
763

764
    Returns:
765 766 767
        Variable : If ``value`` is a tuple/list/numpy\.ndarray object,
            return ``Tensor`` created from the corresponding numpy\.ndarray object, which has
            same data type and shape with ``value``.
768

769 770 771 772 773 774 775 776

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

777
        with fluid.dygraph.guard(fluid.CPUPlace()):
778
            x = np.ones([2, 2], np.float32)
779 780 781
            y = fluid.dygraph.to_variable(x, zero_copy=False)
            x[0][0] = -1
            y[0][0].numpy()  # array([1.], dtype=float32)
782
            y = fluid.dygraph.to_variable(x)
783 784
            x[0][0] = 0
            y[0][0].numpy()  # array([0.], dtype=float32)
785 786 787 788
            c = np.array([2+1j, 2])
            z = fluid.dygraph.to_variable(c)
            z.numpy() # array([2.+1.j, 2.+0.j])
            z.dtype # 'complex128'
789 790 791 792 793 794 795

            y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
            y.shape     # [3L, 2L]

            y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32')
            y.shape     # [3L, 2L]

796
    """
797 798 799 800 801 802 803 804 805 806
    support_type = (
        list,
        tuple,
        np.ndarray,
        core.eager.Tensor,
        core.VarBase,
        framework.Variable,
        core.Tensor,
        core.LoDTensor,
    )
807 808 809
    if not isinstance(value, support_type):
        raise TypeError(
            "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s."
810 811
            % (support_type, type(value))
        )
H
hong 已提交
812
    if isinstance(value, (core.eager.Tensor, core.VarBase, framework.Variable)):
813 814 815 816
        return value
    elif isinstance(value, (core.Tensor, core.LoDTensor)):
        return core.VarBase(value)
    else:
817 818 819 820
        if isinstance(
            framework._current_expected_place(), framework.core.CPUPlace
        ):
            # TODO(zhiqiu): we found two problems when enable zero_copy on CPUPlace.
821
            # (1): eigen requires 16-bytes alignments, but the data of numpy array may not statisfy.
L
Leo Chen 已提交
822 823 824 825 826 827 828 829 830
            # Details: https://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
            # (2): when used in flask framework, it may result in hang.
            # Details: https://github.com/PaddlePaddle/Paddle/issues/26635
            # So, we temporally diable the zero_copy strategy.
            if zero_copy == True:
                warnings.warn(
                    "Currently, zero_copy is not supported, and it will be discarded."
                )
                zero_copy = False
831
        else:
832 833 834
            assert (
                not zero_copy
            ), "zero_copy mode can only be used with CPUPlace"
835 836 837 838 839 840 841 842 843

        if not isinstance(value, np.ndarray):
            value = np.array(value)

        if dtype is not None:
            dtype = convert_dtype(dtype)
            if value.dtype != dtype:
                value = value.astype(dtype)

J
Jiabin Yang 已提交
844
        if _in_eager_without_dygraph_check():
845 846 847 848 849 850 851 852
            return core.eager.Tensor(
                value,
                framework._current_expected_place(),
                False,
                zero_copy,
                name if name else None,
                True,
            )
853
        else:
854 855 856 857 858 859 860
            py_var = core.VarBase(
                value=value,
                place=framework._current_expected_place(),
                persistable=False,
                zero_copy=zero_copy,
                name=name if name else '',
            )
861
            return py_var