未验证 提交 9f642ed8 编写于 作者: L lijianshe02 提交者: GitHub

fix English doc for dice_loss, log_loss, unfold and NLLLoss API test=… (#28739)

* fix English doc for dice_loss, log_loss, unfold and NLLLoss API test=document_fix
上级 4e00c095
...@@ -7068,9 +7068,6 @@ def roi_align(input, ...@@ -7068,9 +7068,6 @@ def roi_align(input,
def dice_loss(input, label, epsilon=0.00001, name=None): def dice_loss(input, label, epsilon=0.00001, name=None):
""" """
:alias_main: paddle.nn.functional.dice_loss
:alias: paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss
:old_api: paddle.fluid.layers.dice_loss
Dice loss for comparing the similarity between the input predictions and the label. Dice loss for comparing the similarity between the input predictions and the label.
This implementation is for binary classification, where the input is sigmoid This implementation is for binary classification, where the input is sigmoid
...@@ -7106,7 +7103,6 @@ def dice_loss(input, label, epsilon=0.00001, name=None): ...@@ -7106,7 +7103,6 @@ def dice_loss(input, label, epsilon=0.00001, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static()
x = paddle.randn((3,224,224,2)) x = paddle.randn((3,224,224,2))
label = paddle.randint(high=2, shape=(3,224,224,1)) label = paddle.randint(high=2, shape=(3,224,224,1))
predictions = F.softmax(x) predictions = F.softmax(x)
...@@ -13039,9 +13035,6 @@ def grid_sampler(x, grid, name=None): ...@@ -13039,9 +13035,6 @@ def grid_sampler(x, grid, name=None):
def log_loss(input, label, epsilon=1e-4, name=None): def log_loss(input, label, epsilon=1e-4, name=None):
""" """
:alias_main: paddle.nn.functional.log_loss
:alias: paddle.nn.functional.log_loss,paddle.nn.functional.loss.log_loss
:old_api: paddle.fluid.layers.log_loss
**Negative Log Loss Layer** **Negative Log Loss Layer**
...@@ -13073,7 +13066,6 @@ def log_loss(input, label, epsilon=1e-4, name=None): ...@@ -13073,7 +13066,6 @@ def log_loss(input, label, epsilon=1e-4, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
paddle.disable_static()
label = paddle.randn((10,1)) label = paddle.randn((10,1))
prob = paddle.randn((10,1)) prob = paddle.randn((10,1))
cost = F.log_loss(input=prob, label=label) cost = F.log_loss(input=prob, label=label)
...@@ -14462,9 +14454,6 @@ def deformable_conv(input, ...@@ -14462,9 +14454,6 @@ def deformable_conv(input,
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
""" """
:alias_main: paddle.nn.functional.unfold
:alias: paddle.nn.functional.unfold,paddle.nn.functional.common.unfold
:old_api: paddle.fluid.layers.unfold
This op returns a col buffer of sliding local blocks of input x, also known This op returns a col buffer of sliding local blocks of input x, also known
as im2col for batched 2D image tensors. For each block under the convolution filter, as im2col for batched 2D image tensors. For each block under the convolution filter,
...@@ -14490,7 +14479,7 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): ...@@ -14490,7 +14479,7 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
Parameters: Parameters:
x(Varaible): 4-D Tensor, input tensor of format [N, C, H, W], x(Tensor): 4-D Tensor, input tensor of format [N, C, H, W],
data type can be float32 or float64 data type can be float32 or float64
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w] kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k]. or an integer k treated as [k, k].
...@@ -14513,22 +14502,24 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): ...@@ -14513,22 +14502,24 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
Returns: Returns:
The tensor variable corresponding to the sliding local blocks. The tensor corresponding to the sliding local blocks.
The output shape is [N, Cout, Lout] as decriabled above. The output shape is [N, Cout, Lout] as decriabled above.
Cout is the total number of values within each block, Cout is the total number of values within each block,
and Lout is the total number of such blocks. and Lout is the total number of such blocks.
The data type of output is the same as the input :math:`x` The data type of output is the same as the input :math:`x`
Return Type: Return Type:
Variable Tensor
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
x = fluid.data(name = 'data', shape = [100, 3, 224, 224], dtype = 'float32') import paddle.nn.functional as F
y = fluid.layers.unfold(x, [3, 3], 1, 1, 1)
x = paddle.randn((100,3,224,224))
y = F.unfold(x, [3, 3], 1, 1, 1)
""" """
helper = LayerHelper("unfold", **locals()) helper = LayerHelper("unfold", **locals())
......
...@@ -780,13 +780,11 @@ def nll_loss(input, ...@@ -780,13 +780,11 @@ def nll_loss(input,
[0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32)
label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64)
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_tensor(input_np) input = paddle.to_tensor(input_np)
log_out = log_softmax(input) log_out = log_softmax(input)
label = paddle.to_tensor(label_np) label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label) result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209] print(result) # [1.0720209]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
raise ValueError( raise ValueError(
......
...@@ -625,8 +625,6 @@ class BCELoss(fluid.dygraph.Layer): ...@@ -625,8 +625,6 @@ class BCELoss(fluid.dygraph.Layer):
class NLLLoss(fluid.dygraph.Layer): class NLLLoss(fluid.dygraph.Layer):
""" """
:alias_main: paddle.nn.NLLLoss
:alias: paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss,paddle.nn.layer.loss.NLLLoss
This class accepts input and target label and returns negative log likelihood This class accepts input and target label and returns negative log likelihood
cross error. It is useful to train a classification problem with C classes. cross error. It is useful to train a classification problem with C classes.
...@@ -693,7 +691,7 @@ class NLLLoss(fluid.dygraph.Layer): ...@@ -693,7 +691,7 @@ class NLLLoss(fluid.dygraph.Layer):
import paddle import paddle
import numpy as np import numpy as np
nll_loss = paddle.nn.layer.NLLLoss() nll_loss = paddle.nn.NLLLoss()
log_softmax = paddle.nn.LogSoftmax(axis=1) log_softmax = paddle.nn.LogSoftmax(axis=1)
input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ], input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ],
...@@ -703,13 +701,11 @@ class NLLLoss(fluid.dygraph.Layer): ...@@ -703,13 +701,11 @@ class NLLLoss(fluid.dygraph.Layer):
[0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32) [0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32)
label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64) label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64)
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_tensor(input_np) input = paddle.to_tensor(input_np)
log_out = log_softmax(input) log_out = log_softmax(input)
label = paddle.to_tensor(label_np) label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label) result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209] print(result) # [1.0720209]
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册