未验证 提交 dd0a07f2 编写于 作者: X Xiaoxu Chen 提交者: GitHub

fix new autodiff api docs (#44341)

上级 3f70b1d3
......@@ -26,14 +26,14 @@ def forward_grad(outputs, inputs, grad_inputs=None):
**ONLY available in the static mode and primitive operators.**
Args:
outputs: The output tensor or tensors
inputs: The input tensor or tensors
grad_inputs: The gradient Tensor or Tensors of inputs which has
the same shape with inputs, Defaults to None, in this case is
equivalent to all ones .
outputs(Tensor|Sequence[Tensor]): The output tensor or tensors.
inputs(Tensor|Sequence[Tensor]): The input tensor or tensors.
grad_inputs(Tensor|Sequence[Tensor]): Optional, the gradient Tensor or
Tensors of inputs which has the same shape with inputs, Defaults to
None, in this case is equivalent to all ones.
Returns:
grad_outputs (Tensor|Sequence[Tensor]): The gradients for outputs.
grad_outputs(Tensor|Sequence[Tensor]): The gradients for outputs.
Examples:
......@@ -99,14 +99,14 @@ def grad(outputs, inputs, grad_outputs=None):
**ONLY available in the static mode and primitive operators**
Args:
outputs (Tensor|Sequence[Tensor]): The output Tensor or Tensors.
inputs (Tensor|Sequence[Tensor]): The input Tensor or Tensors.
grad_outputs (Tensor|Sequence[Tensor]): The gradient Tensor or
outputs(Tensor|Sequence[Tensor]): The output Tensor or Tensors.
inputs(Tensor|Sequence[Tensor]): The input Tensor or Tensors.
grad_outputs(Tensor|Sequence[Tensor]): Optional, the gradient Tensor or
Tensors of outputs which has the same shape with outputs, Defaults
to None, in this case is equivalent to all ones .
to None, in this case is equivalent to all ones.
Returns:
grad_inputs (Tensor|Tensors): The gradients for inputs.
grad_inputs(Tensor|Tensors): The gradients for inputs.
Examples:
......@@ -114,8 +114,10 @@ def grad(outputs, inputs, grad_outputs=None):
import numpy as np
import paddle
paddle.enable_static()
paddle.incubate.autograd.enable_prim()
startup_program = paddle.static.Program()
main_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
......@@ -124,11 +126,13 @@ def grad(outputs, inputs, grad_outputs=None):
y = x * x
x_grad = paddle.incubate.autograd.grad(y, x)
paddle.incubate.autograd.prim2orig()
exe = paddle.static.Executor()
exe.run(startup_program)
x_grad = exe.run(main_program, feed={'x': np.array([2.]).astype('float32')}, fetch_list=[x_grad])
print(x_grad)
# [array([4.], dtype=float32)]
paddle.incubate.autograd.disable_prim()
paddle.disable_static()
"""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册