未验证 提交 1f6df878 编写于 作者: Z Zhong Hui 提交者: GitHub

fix doc, use to_tensor

fix doc, use to_tensor for the loss ops
上级 7ee70a47
......@@ -147,7 +147,6 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
label = paddle.to_tensor(label_data)
output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095]
paddle.enable_static()
"""
if reduction not in ['sum', 'mean', 'none']:
......@@ -165,8 +164,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
return core.ops.reduce_sum(out, 'dim', [0], 'keep_dim', False,
"reduce_all", True)
elif reduction == 'mean':
return core.ops.reduce_mean(out, 'dim', [0], 'keep_dim', False,
"reduce_all", True)
return core.ops.mean(out)
else:
return out
......@@ -467,14 +465,12 @@ def margin_ranking_loss(input,
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype('float32'))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype('float32'))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype('float32'))
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75]
"""
......@@ -578,8 +574,8 @@ def l1_loss(input, label, reduction='mean', name=None):
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
......@@ -675,9 +671,9 @@ def nll_loss(input,
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
"""
......
......@@ -44,10 +44,10 @@ class PairwiseDistance(layers.Layer):
For more information, please refer to :ref:`api_guide_Name`.
Shape:
x: :math:`(N, D)` where `D` is the dimension of vector, available dtype
x: :math:`[N, D]` where `D` is the dimension of vector, available dtype
is float32, float64.
y: :math:`(N, D)`, y have the same shape and dtype as x.
out: :math:`(N)`. If :attr:`keepdim` is ``True``, the out shape is :math:`(N, 1)`.
y: :math:`[N, D]`, y have the same shape and dtype as x.
out: :math:`[N]`. If :attr:`keepdim` is ``True``, the out shape is :math:`[N, 1]`.
The same dtype as input tensor.
Examples:
......@@ -58,8 +58,8 @@ class PairwiseDistance(layers.Layer):
paddle.disable_static()
x_np = np.array([[1., 3.], [3., 5.]]).astype(np.float64)
y_np = np.array([[5., 6.], [7., 8.]]).astype(np.float64)
x = paddle.to_variable(x_np)
y = paddle.to_variable(y_np)
x = paddle.to_tensor(x_np)
y = paddle.to_tensor(y_np)
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance.numpy()) # [5. 5.]
......
......@@ -376,8 +376,8 @@ class L1Loss(fluid.dygraph.Layer):
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
l1_loss = paddle.nn.loss.L1Loss()
output = l1_loss(input, label)
......@@ -455,7 +455,7 @@ class BCELoss(fluid.dygraph.Layer):
For more information, please refer to :ref:`api_guide_Name`.
Shape:
input (Tensor): 2-D tensor with shape: (N, *), N is batch_size, `*` means
input (Tensor): 2-D tensor with shape: [N, *], N is batch_size, `*` means
number of additional dimensions. The input ``input`` should always
be the output of sigmod. Available dtype is float32, float64.
label (Tensor): 2-D tensor with the same shape as ``input``. The target
......@@ -476,12 +476,11 @@ class BCELoss(fluid.dygraph.Layer):
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
bce_loss = paddle.nn.loss.BCELoss()
output = bce_loss(input, label)
print(output.numpy()) # [0.65537095]
paddle.enable_static()
"""
......@@ -584,9 +583,9 @@ class NLLLoss(fluid.dygraph.Layer):
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
......@@ -729,14 +728,12 @@ class MarginRankingLoss(fluid.dygraph.Layer):
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype("float32"))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype("float32"))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype("float32"))
input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册