From dd0a07f23620f7a859c410ed8be3963f0ea6d647 Mon Sep 17 00:00:00 2001 From: Xiaoxu Chen Date: Mon, 18 Jul 2022 21:23:27 +0800 Subject: [PATCH] fix new autodiff api docs (#44341) --- python/paddle/incubate/autograd/primapi.py | 26 +++++++++++++--------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/python/paddle/incubate/autograd/primapi.py b/python/paddle/incubate/autograd/primapi.py index a319874e25..ba7a2537df 100644 --- a/python/paddle/incubate/autograd/primapi.py +++ b/python/paddle/incubate/autograd/primapi.py @@ -26,14 +26,14 @@ def forward_grad(outputs, inputs, grad_inputs=None): **ONLY available in the static mode and primitive operators.** Args: - outputs: The output tensor or tensors - inputs: The input tensor or tensors - grad_inputs: The gradient Tensor or Tensors of inputs which has - the same shape with inputs, Defaults to None, in this case is - equivalent to all ones . + outputs(Tensor|Sequence[Tensor]): The output tensor or tensors. + inputs(Tensor|Sequence[Tensor]): The input tensor or tensors. + grad_inputs(Tensor|Sequence[Tensor]): Optional, the gradient Tensor or + Tensors of inputs which has the same shape with inputs, Defaults to + None, in this case is equivalent to all ones. Returns: - grad_outputs (Tensor|Sequence[Tensor]): The gradients for outputs. + grad_outputs(Tensor|Sequence[Tensor]): The gradients for outputs. Examples: @@ -99,14 +99,14 @@ def grad(outputs, inputs, grad_outputs=None): **ONLY available in the static mode and primitive operators** Args: - outputs (Tensor|Sequence[Tensor]): The output Tensor or Tensors. - inputs (Tensor|Sequence[Tensor]): The input Tensor or Tensors. - grad_outputs (Tensor|Sequence[Tensor]): The gradient Tensor or + outputs(Tensor|Sequence[Tensor]): The output Tensor or Tensors. + inputs(Tensor|Sequence[Tensor]): The input Tensor or Tensors. + grad_outputs(Tensor|Sequence[Tensor]): Optional, the gradient Tensor or Tensors of outputs which has the same shape with outputs, Defaults - to None, in this case is equivalent to all ones . + to None, in this case is equivalent to all ones. Returns: - grad_inputs (Tensor|Tensors): The gradients for inputs. + grad_inputs(Tensor|Tensors): The gradients for inputs. Examples: @@ -114,8 +114,10 @@ def grad(outputs, inputs, grad_outputs=None): import numpy as np import paddle + paddle.enable_static() paddle.incubate.autograd.enable_prim() + startup_program = paddle.static.Program() main_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): @@ -124,11 +126,13 @@ def grad(outputs, inputs, grad_outputs=None): y = x * x x_grad = paddle.incubate.autograd.grad(y, x) paddle.incubate.autograd.prim2orig() + exe = paddle.static.Executor() exe.run(startup_program) x_grad = exe.run(main_program, feed={'x': np.array([2.]).astype('float32')}, fetch_list=[x_grad]) print(x_grad) # [array([4.], dtype=float32)] + paddle.incubate.autograd.disable_prim() paddle.disable_static() """ -- GitLab