From 34df32d6f5769cb32b8e71cced2f019df8137323 Mon Sep 17 00:00:00 2001 From: Jack Zhou Date: Thu, 3 Dec 2020 10:21:08 +0800 Subject: [PATCH] fix nll_loss doc;test=document_fix; (#29247) (#29311) fix nll_loss doc;test=document_fix --- python/paddle/nn/functional/loss.py | 19 ++++++++----------- python/paddle/nn/layer/loss.py | 22 ++++++++-------------- 2 files changed, 16 insertions(+), 25 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index cc1010772c2..df83b174b8a 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -767,23 +767,20 @@ def nll_loss(input, Examples: .. code-block:: python + import paddle - import numpy as np from paddle.nn.functional import nll_loss log_softmax = paddle.nn.LogSoftmax(axis=1) - input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], - [0.53331435, 0.07999352, 0.8549948 ], - [0.25879037, 0.39530203, 0.698465 ], - [0.73427284, 0.63575995, 0.18827209], - [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) - label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) - - input = paddle.to_tensor(input_np) + input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ], + [0.53331435, 0.07999352, 0.8549948 ], + [0.25879037, 0.39530203, 0.698465 ], + [0.73427284, 0.63575995, 0.18827209], + [0.05689114, 0.0862954 , 0.6325046 ]], "float32") log_out = log_softmax(input) - label = paddle.to_tensor(label_np) + label = paddle.to_tensor([0, 2, 1, 1, 0], "int64") result = nll_loss(log_out, label) - print(result) # [1.0720209] + print(result) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True, [1.07202101]) """ if reduction not in ['sum', 'mean', 'none']: raise ValueError( diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index ae5f730f2df..e8687af063e 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -690,25 +690,19 @@ class NLLLoss(fluid.dygraph.Layer): .. code-block:: python import paddle - import numpy as np - nll_loss = paddle.nn.layer.NLLLoss() + nll_loss = paddle.nn.loss.NLLLoss() log_softmax = paddle.nn.LogSoftmax(axis=1) - input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], - [0.53331435, 0.07999352, 0.8549948 ], - [0.25879037, 0.39530203, 0.698465 ], - [0.73427284, 0.63575995, 0.18827209], - [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) - label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) - - place = paddle.CPUPlace() - paddle.disable_static(place) - input = paddle.to_tensor(input_np) + input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ], + [0.53331435, 0.07999352, 0.8549948 ], + [0.25879037, 0.39530203, 0.698465 ], + [0.73427284, 0.63575995, 0.18827209], + [0.05689114, 0.0862954 , 0.6325046 ]], "float32") log_out = log_softmax(input) - label = paddle.to_tensor(label_np) + label = paddle.to_tensor([0, 2, 1, 1, 0], "int64") result = nll_loss(log_out, label) - print(result.numpy()) # [1.0720209] + print(result) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True, [1.07202101]) """ -- GitLab