未验证 提交 e5985675 编写于 作者: Y Yang Zhang 提交者: GitHub

[Cherry-pick Release 2.0] Add `paddle.nn.loss.MSELoss` (#23981)

* Add `paddle.nn.loss.MSELoss`

test=develop

* Move to `nn/layer/loss.py`

test=develop

* Fix dygraph

test=develop

* Add test

test=develop

* Increase numel in test

test=develop

* Add test for input with different dimensions

test=develop
上级 7cc47e90
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import sys import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
...@@ -64,5 +64,118 @@ class TestMseInvalidInput(unittest.TestCase): ...@@ -64,5 +64,118 @@ class TestMseInvalidInput(unittest.TestCase):
self.assertRaises(TypeError, test_invalid_label) self.assertRaises(TypeError, test_invalid_label)
class TestNNMseLoss(unittest.TestCase):
def test_NNMseLoss_mean(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.layers.data(
name='input', shape=dim, dtype='float32')
label = fluid.layers.data(
name='label', shape=dim, dtype='float32')
mse_loss = paddle.nn.loss.MSELoss()
ret = mse_loss(input, label)
exe = fluid.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np,
"label": label_np},
fetch_list=[ret])
with fluid.dygraph.guard():
mse_loss = paddle.nn.loss.MSELoss()
dy_ret = mse_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_result = dy_ret.numpy()
sub = input_np - label_np
expected = np.mean(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])
def test_NNMseLoss_sum(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.layers.data(
name='input', shape=dim, dtype='float32')
label = fluid.layers.data(
name='label', shape=dim, dtype='float32')
mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
ret = mse_loss(input, label)
exe = fluid.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np,
"label": label_np},
fetch_list=[ret])
with fluid.dygraph.guard():
mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
dy_ret = mse_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_result = dy_ret.numpy()
sub = input_np - label_np
expected = np.sum(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])
def test_NNMseLoss_none(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.layers.data(
name='input', shape=dim, dtype='float32')
label = fluid.layers.data(
name='label', shape=dim, dtype='float32')
mse_loss = paddle.nn.loss.MSELoss(reduction='none')
ret = mse_loss(input, label)
exe = fluid.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np,
"label": label_np},
fetch_list=[ret])
with fluid.dygraph.guard():
mse_loss = paddle.nn.loss.MSELoss(reduction='none')
dy_ret = mse_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_result = dy_ret.numpy()
sub = input_np - label_np
expected = (sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -17,7 +17,7 @@ import paddle.fluid as fluid ...@@ -17,7 +17,7 @@ import paddle.fluid as fluid
__all__ = [ __all__ = [
#'NCELoss', #'NCELoss',
'CrossEntropyLoss', 'CrossEntropyLoss',
# 'MSELoss', 'MSELoss',
'L1Loss', 'L1Loss',
# 'NLLLoss', # 'NLLLoss',
'BCELoss' 'BCELoss'
...@@ -135,6 +135,97 @@ class CrossEntropyLoss(fluid.dygraph.Layer): ...@@ -135,6 +135,97 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
return out return out
class MSELoss(fluid.dygraph.layers.Layer):
"""
**Mean Square Error Loss**
Computes the mean square error (squared L2 norm) of given input and label.
If :attr:`reduction` is set to ``'none'``, loss is calculated as:
.. math::
Out = (input - label)^2
If :attr:`reduction` is set to ``'mean'``, loss is calculated as:
.. math::
Out = \operatorname{mean}((input - label)^2)
If :attr:`reduction` is set to ``'sum'``, loss is calculated as:
.. math::
Out = \operatorname{sum}((input - label)^2)
where `input` and `label` are `float32` tensors of arbitrary shapes.
Parameters:
reduction (string, optional): The reduction method for the output,
could be 'none' | 'mean' | 'sum'.
'none': no reduction will be applied
'mean': the output will be averaged
'sum': the output will be summed
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle import fluid
import paddle.fluid.dygraph as dg
mse_loss = paddle.nn.loss.MSELoss()
input = fluid.data(name="input", shape=[1])
label = fluid.data(name="label", shape=[1])
place = fluid.CPUPlace()
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
# declarative mode
output = mse_loss(input,label)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
output_data = exe.run(
fluid.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# imperative mode
with dg.guard(place) as g:
input = dg.to_variable(input_data)
label = dg.to_variable(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
"""
def __init__(self, reduction='mean'):
super(MSELoss, self).__init__()
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"'reduction' in 'MSELoss' should be 'sum', 'mean' or 'none', "
"but received {}.".format(reduction))
self.reduction = reduction
def forward(self, input, label):
if not fluid.framework.in_dygraph_mode():
fluid.data_feeder.check_variable_and_dtype(input, 'input',
['float32'], 'MSELoss')
fluid.data_feeder.check_variable_and_dtype(label, 'label',
['float32'], 'MSELoss')
square_out = fluid.layers.square(
fluid.layers.elementwise_sub(input, label))
if self.reduction == 'none':
return square_out
reduce_op = 'reduce_mean'
if self.reduction == 'sum':
reduce_op = 'reduce_sum'
return getattr(fluid.layers, reduce_op)(square_out)
class L1Loss(fluid.dygraph.Layer): class L1Loss(fluid.dygraph.Layer):
""" """
This interface is used to construct a callable object of the ``L1Loss`` class. This interface is used to construct a callable object of the ``L1Loss`` class.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册