未验证 提交 cf433221 编写于 作者: J Jack Zhou 提交者: GitHub

fix nll_loss doc;test=document_fix; (#29247)

* fix nll_loss doc;test=document_fix;

* remove numpy and set_device;test=document_fix;

* remove numpy;test=document_fix;
上级 b9f1f434
...@@ -767,23 +767,20 @@ def nll_loss(input, ...@@ -767,23 +767,20 @@ def nll_loss(input,
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
from paddle.nn.functional import nll_loss from paddle.nn.functional import nll_loss
log_softmax = paddle.nn.LogSoftmax(axis=1) log_softmax = paddle.nn.LogSoftmax(axis=1)
input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ],
[0.53331435, 0.07999352, 0.8549948 ], [0.53331435, 0.07999352, 0.8549948 ],
[0.25879037, 0.39530203, 0.698465 ], [0.25879037, 0.39530203, 0.698465 ],
[0.73427284, 0.63575995, 0.18827209], [0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) [0.05689114, 0.0862954 , 0.6325046 ]], "float32")
label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input) log_out = log_softmax(input)
label = paddle.to_tensor(label_np) label = paddle.to_tensor([0, 2, 1, 1, 0], "int64")
result = nll_loss(log_out, label) result = nll_loss(log_out, label)
print(result) # [1.0720209] print(result) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True, [1.07202101])
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
raise ValueError( raise ValueError(
......
...@@ -690,25 +690,19 @@ class NLLLoss(fluid.dygraph.Layer): ...@@ -690,25 +690,19 @@ class NLLLoss(fluid.dygraph.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
nll_loss = paddle.nn.layer.NLLLoss() nll_loss = paddle.nn.loss.NLLLoss()
log_softmax = paddle.nn.LogSoftmax(axis=1) log_softmax = paddle.nn.LogSoftmax(axis=1)
input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ],
[0.53331435, 0.07999352, 0.8549948 ], [0.53331435, 0.07999352, 0.8549948 ],
[0.25879037, 0.39530203, 0.698465 ], [0.25879037, 0.39530203, 0.698465 ],
[0.73427284, 0.63575995, 0.18827209], [0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) [0.05689114, 0.0862954 , 0.6325046 ]], "float32")
label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64)
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input) log_out = log_softmax(input)
label = paddle.to_tensor(label_np) label = paddle.to_tensor([0, 2, 1, 1, 0], "int64")
result = nll_loss(log_out, label) result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209] print(result) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True, [1.07202101])
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册