未验证 提交 ac8a4b16 编写于 作者: Y yuehuayingxueluo 提交者: GitHub

clear fluid api: kldiv_loss, kldiv_loss, mse_loss (#48147)

上级 4952f344
......@@ -41,9 +41,6 @@ __all__ = [
'nce',
'softmax_with_cross_entropy',
'sigmoid_cross_entropy_with_logits',
'kldiv_loss',
'npair_loss',
'mse_loss',
]
kIgnoreIndex = -100
......@@ -826,142 +823,3 @@ def sigmoid_cross_entropy_with_logits(
outputs={"Out": out},
)
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.kl_div")
@templatedoc()
def kldiv_loss(x, target, reduction='mean', name=None):
"""
${comment}
Args:
x (Tensor): ${x_comment}
target (Tensor): ${target_comment}
reduction (Tensor): ${reduction_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: The KL divergence loss. The data type is same as input tensor
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = paddle.rand(shape=[3,4,2,2], dtype='float32')
target = paddle.rand(shape=[3,4,2,2], dtype='float32')
# 'batchmean' reduction, loss shape will be [1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean')
print(loss.shape) # shape=[1]
# 'mean' reduction, loss shape will be [1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean')
print(loss.shape) # shape=[1]
# 'sum' reduction, loss shape will be [1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum')
print(loss.shape) # shape=[1]
# 'none' reduction, loss shape is same with X shape
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none')
print(loss.shape) # shape=[3, 4, 2, 2]
"""
helper = LayerHelper('kldiv_loss', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'kldiv_loss')
check_variable_and_dtype(
target, 'target', ['float32', 'float64'], 'kldiv_loss'
)
check_type(reduction, 'reduction', str, 'kldiv_loss')
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='kldiv_loss',
inputs={'X': x, 'Target': target},
outputs={'Loss': loss},
attrs={'reduction': reduction},
)
return loss
from .control_flow import equal
def npair_loss(anchor, positive, labels, l2_reg=0.002):
"""
Npair loss requires paired data. Npair loss has two parts: the first part is L2
regularizer on the embedding vector; the second part is cross entropy loss which
takes the similarity matrix of anchor and positive as logits.
For more information, please refer to:
`Improved Deep Metric Learning with Multi class N pair Loss Objective <http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf>`_
Args:
anchor(Tensor): embedding vector for the anchor image. shape=[batch_size, embedding_dims],
the data type is float32 or float64.
positive(Tensor): embedding vector for the positive image. shape=[batch_size, embedding_dims],
the data type is float32 or float64.
labels(Tensor): 1-D tensor. shape=[batch_size], the data type is float32 or float64 or int64.
l2_reg(float32): L2 regularization term on embedding vector, default: 0.002.
Returns:
A Tensor representing the npair loss, the data type is the same as anchor, the shape is [1].
Examples:
.. code-block:: python
import paddle
DATATYPE = "float32"
anchor = paddle.rand(shape=(18, 6), dtype=DATATYPE)
positive = paddle.rand(shape=(18, 6), dtype=DATATYPE)
labels = paddle.rand(shape=(18,), dtype=DATATYPE)
npair_loss = paddle.nn.functional.npair_loss(anchor, positive, labels, l2_reg = 0.002)
print(npair_loss)
"""
return paddle.nn.functional.npair_loss(anchor, positive, labels, l2_reg)
def mse_loss(input, label):
"""
This op accepts input predications and target label and returns the mean square error.
The loss can be described as:
.. math::
Out = MEAN((input - label)^2)
Parameters:
input (Tensor): Input tensor, the data type should be float32.
label (Tensor): Label tensor, the data type should be float32.
Returns:
Tensor: The tensor storing the mean square error difference of input and label.
Return type: Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.to_tensor([1.1, 1.9])
label = paddle.to_tensor([1.0, 2.0])
output = paddle.fluid.layers.mse_loss(input, label)
print(output.numpy())
# [0.01]
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'], 'mse_loss')
check_variable_and_dtype(label, "label", ['float32', 'float64'], 'mse_loss')
return nn.reduce_mean(square_error_cost(input, label))
......@@ -238,7 +238,7 @@ class TestWithoutIdentityLoss2(TestBase):
class TestWithoutIdentityLoss3(TestBase):
def set_op_attrs(self):
self.loss_op = partial(paddle.fluid.layers.kldiv_loss, reduction="none")
self.loss_op = partial(paddle.nn.functional.kl_div, reduction="none")
def set_data_feed(self):
self.data = paddle.uniform((8, 3, 10, 10), dtype='float32')
......
......@@ -57,7 +57,7 @@ class TestBase(IPUOpTest):
target = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32'
)
out = paddle.fluid.layers.kldiv_loss(x, target, **self.attrs)
out = paddle.nn.functional.kl_div(x, target, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......
......@@ -3731,7 +3731,9 @@ class TestBook(LayerTest):
dtype="float32",
append_batch_size=False,
)
loss = layers.kldiv_loss(x=x, target=target, reduction='batchmean')
loss = paddle.nn.functional.kl_div(
input=x, label=target, reduction='batchmean'
)
return loss
def make_temporal_shift(self):
......@@ -3773,7 +3775,7 @@ class TestBook(LayerTest):
):
x = self._get_data(name="X", shape=[1], dtype="float32")
y = self._get_data(name="Y", shape=[1], dtype="float32")
out = layers.mse_loss(input=x, label=y)
out = paddle.nn.functional.mse_loss(input=x, label=y)
return out
def make_square_error_cost(self):
......
......@@ -17,7 +17,6 @@ import numpy as np
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
......@@ -32,7 +31,7 @@ class TestMseLoss(unittest.TestCase):
input_var = fluid.data(name="input", shape=[-1, 3], dtype="float32")
label_var = fluid.data(name="label", shape=[-1, 3], dtype="float32")
output = layers.mse_loss(input=input_var, label=label_var)
output = paddle.nn.functional.mse_loss(input=input_var, label=label_var)
for use_cuda in (
[False, True] if core.is_compiled_with_cuda() else [False]
):
......@@ -52,14 +51,14 @@ class TestMseInvalidInput(unittest.TestCase):
def test_invalid_input():
input = [256, 3]
label = fluid.data(name='label1', shape=[None, 3], dtype='float32')
loss = fluid.layers.mse_loss(input, label)
loss = paddle.nn.functional.mse_loss(input, label)
self.assertRaises(TypeError, test_invalid_input)
def test_invalid_label():
input = fluid.data(name='input1', shape=[None, 3], dtype='float32')
label = [256, 3]
loss = fluid.layers.mse_loss(input, label)
loss = paddle.nn.functional.mse_loss(input, label)
self.assertRaises(TypeError, test_invalid_label)
......
......@@ -14,6 +14,7 @@
import unittest
import paddle.fluid as fluid
import paddle
import paddle.fluid.core as core
import numpy as np
from paddle.fluid import Program, program_guard
......@@ -99,7 +100,7 @@ class TestNpairLossOp(unittest.TestCase):
append_batch_size=False,
)
npair_loss_op = fluid.layers.npair_loss(
npair_loss_op = paddle.nn.functional.npair_loss(
anchor=anc, positive=pos, labels=lab, l2_reg=reg_lambda
)
out_tensor = exe.run(
......@@ -140,19 +141,19 @@ class TestNpairLossOpError(unittest.TestCase):
def test_anchor_Variable():
# the anchor type must be Variable
fluid.layers.npair_loss(
paddle.nn.functional.npair_loss(
anchor=anchor_np, positive=positive_data, labels=labels_data
)
def test_positive_Variable():
# the positive type must be Variable
fluid.layers.npair_loss(
paddle.nn.functional.npair_loss(
anchor=anchor_data, positive=positive_np, labels=labels_data
)
def test_labels_Variable():
# the labels type must be Variable
fluid.layers.npair_loss(
paddle.nn.functional.npair_loss(
anchor=anchor_data, positive=positive_data, labels=labels_np
)
......@@ -165,7 +166,7 @@ class TestNpairLossOpError(unittest.TestCase):
anchor_data1 = fluid.data(
name='anchor1', shape=[2, 4], dtype='int32'
)
fluid.layers.npair_loss(
paddle.nn.functional.npair_loss(
anchor=anchor_data, positive=positive_data, labels=labels_np
)
......@@ -174,7 +175,7 @@ class TestNpairLossOpError(unittest.TestCase):
positive_data1 = fluid.data(
name='positive1', shape=[2, 4], dtype='int32'
)
fluid.layers.npair_loss(
paddle.nn.functional.npair_loss(
anchor=anchor_data,
positive=positive_data1,
labels=labels_np,
......@@ -185,7 +186,7 @@ class TestNpairLossOpError(unittest.TestCase):
labels_data1 = fluid.data(
name='labels1', shape=[2], dtype='int32'
)
fluid.layers.npair_loss(
paddle.nn.functional.npair_loss(
anchor=anchor_data,
positive=positive_data,
labels=labels_data1,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册