diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index f1509143f3c933db12fc4ab6afd1a00b291f38f4..d2ddee654f4d04de152d15130ba53c424af3e5b2 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -147,7 +147,6 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', label = paddle.to_tensor(label_data) output = paddle.nn.functional.binary_cross_entropy(input, label) print(output.numpy()) # [0.65537095] - paddle.enable_static() """ if reduction not in ['sum', 'mean', 'none']: @@ -165,8 +164,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', return core.ops.reduce_sum(out, 'dim', [0], 'keep_dim', False, "reduce_all", True) elif reduction == 'mean': - return core.ops.reduce_mean(out, 'dim', [0], 'keep_dim', False, - "reduce_all", True) + return core.ops.mean(out) else: return out @@ -467,14 +465,12 @@ def margin_ranking_loss(input, .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype('float32')) - other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype('float32')) - label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype('float32')) + input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') + other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') loss = paddle.nn.functional.margin_ranking_loss(input, other, label) print(loss.numpy()) # [0.75] """ @@ -578,8 +574,8 @@ def l1_loss(input, label, reduction='mean', name=None): paddle.disable_static() input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) l1_loss = paddle.nn.functional.l1_loss(input, label) print(l1_loss.numpy()) @@ -675,9 +671,9 @@ def nll_loss(input, place = paddle.CPUPlace() paddle.disable_static(place) - input = paddle.to_variable(input_np) + input = paddle.to_tensor(input_np) log_out = log_softmax(input) - label = paddle.to_variable(label_np) + label = paddle.to_tensor(label_np) result = nll_loss(log_out, label) print(result.numpy()) # [1.0720209] """ diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index b0917441de3fea640204a3891ed03e9a451e3f0f..334b71151b563f9f68fc4e7e1c89d83697e1fb4c 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -44,10 +44,10 @@ class PairwiseDistance(layers.Layer): For more information, please refer to :ref:`api_guide_Name`. Shape: - x: :math:`(N, D)` where `D` is the dimension of vector, available dtype + x: :math:`[N, D]` where `D` is the dimension of vector, available dtype is float32, float64. - y: :math:`(N, D)`, y have the same shape and dtype as x. - out: :math:`(N)`. If :attr:`keepdim` is ``True``, the out shape is :math:`(N, 1)`. + y: :math:`[N, D]`, y have the same shape and dtype as x. + out: :math:`[N]`. If :attr:`keepdim` is ``True``, the out shape is :math:`[N, 1]`. The same dtype as input tensor. Examples: @@ -58,8 +58,8 @@ class PairwiseDistance(layers.Layer): paddle.disable_static() x_np = np.array([[1., 3.], [3., 5.]]).astype(np.float64) y_np = np.array([[5., 6.], [7., 8.]]).astype(np.float64) - x = paddle.to_variable(x_np) - y = paddle.to_variable(y_np) + x = paddle.to_tensor(x_np) + y = paddle.to_tensor(y_np) dist = paddle.nn.PairwiseDistance() distance = dist(x, y) print(distance.numpy()) # [5. 5.] diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index a1c7d28a85e762ebb381c5f0075df1c7b00396f7..a60e615d5064bf4ef2229dd67193774030383888 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -376,8 +376,8 @@ class L1Loss(fluid.dygraph.Layer): paddle.disable_static() input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) l1_loss = paddle.nn.loss.L1Loss() output = l1_loss(input, label) @@ -455,7 +455,7 @@ class BCELoss(fluid.dygraph.Layer): For more information, please refer to :ref:`api_guide_Name`. Shape: - input (Tensor): 2-D tensor with shape: (N, *), N is batch_size, `*` means + input (Tensor): 2-D tensor with shape: [N, *], N is batch_size, `*` means number of additional dimensions. The input ``input`` should always be the output of sigmod. Available dtype is float32, float64. label (Tensor): 2-D tensor with the same shape as ``input``. The target @@ -476,12 +476,11 @@ class BCELoss(fluid.dygraph.Layer): label_data = np.array([1.0, 0.0, 1.0]).astype("float32") paddle.disable_static() - input = paddle.to_variable(input_data) - label = paddle.to_variable(label_data) + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) bce_loss = paddle.nn.loss.BCELoss() output = bce_loss(input, label) print(output.numpy()) # [0.65537095] - paddle.enable_static() """ @@ -584,9 +583,9 @@ class NLLLoss(fluid.dygraph.Layer): place = paddle.CPUPlace() paddle.disable_static(place) - input = paddle.to_variable(input_np) + input = paddle.to_tensor(input_np) log_out = log_softmax(input) - label = paddle.to_variable(label_np) + label = paddle.to_tensor(label_np) result = nll_loss(log_out, label) print(result.numpy()) # [1.0720209] @@ -729,14 +728,12 @@ class MarginRankingLoss(fluid.dygraph.Layer): .. code-block:: python - import numpy as np import paddle - paddle.disable_static() - input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype("float32")) - other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype("float32")) - label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype("float32")) + input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32") + other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32") + label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32") margin_rank_loss = paddle.nn.MarginRankingLoss() loss = margin_rank_loss(input, other, label) print(loss.numpy()) # [0.75]