未验证 提交 22b06db3 编写于 作者: B Bai Yifan 提交者: GitHub

add paddle.nn.functional.mse_loss (#26089)

* add paddle.nn.functional.mse_loss

* add name

* fix conflict
上级 29367bfe
...@@ -69,6 +69,7 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -69,6 +69,7 @@ class TestNNMseLoss(unittest.TestCase):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]: for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32") label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = fluid.Program() prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
...@@ -106,6 +107,7 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -106,6 +107,7 @@ class TestNNMseLoss(unittest.TestCase):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]: for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32") label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = fluid.Program() prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
...@@ -143,6 +145,7 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -143,6 +145,7 @@ class TestNNMseLoss(unittest.TestCase):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]: for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
label_np = np.random.uniform(0.1, 0.5, dim).astype("float32") label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = fluid.Program() prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
...@@ -177,5 +180,112 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -177,5 +180,112 @@ class TestNNMseLoss(unittest.TestCase):
self.assertTrue(dy_result.shape, [1]) self.assertTrue(dy_result.shape, [1])
class TestNNFunctionalMseLoss(unittest.TestCase):
def test_NNFunctionalMseLoss_mean(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean')
exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(
prog,
feed={"input": input_np,
"target": target_np},
fetch_list=[mse_loss])
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'mean')
dy_result = dy_ret.numpy()
sub = input_np - target_np
expected = np.mean(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])
def test_NNFunctionalMseLoss_sum(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum')
exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(
prog,
feed={"input": input_np,
"target": target_np},
fetch_list=[mse_loss])
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'sum')
dy_result = dy_ret.numpy()
sub = input_np - target_np
expected = np.sum(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])
def test_NNFunctionalMseLoss_none(self):
for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'none')
exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(
prog,
feed={"input": input_np,
"target": target_np},
fetch_list=[mse_loss])
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'none')
dy_result = dy_ret.numpy()
sub = input_np - target_np
expected = sub * sub
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(dy_result.shape, [1])
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
# TODO: define loss functions of neural network # TODO: define loss functions of neural network
import numpy as np import numpy as np
import paddle import paddle
...@@ -25,7 +27,6 @@ from ...fluid.layers import dice_loss #DEFINE_ALIAS ...@@ -25,7 +27,6 @@ from ...fluid.layers import dice_loss #DEFINE_ALIAS
from ...fluid.layers import iou_similarity #DEFINE_ALIAS from ...fluid.layers import iou_similarity #DEFINE_ALIAS
from ...fluid.layers import kldiv_loss #DEFINE_ALIAS from ...fluid.layers import kldiv_loss #DEFINE_ALIAS
from ...fluid.layers import log_loss #DEFINE_ALIAS from ...fluid.layers import log_loss #DEFINE_ALIAS
from ...fluid.layers import mse_loss #DEFINE_ALIAS
from ...fluid.layers import npair_loss #DEFINE_ALIAS from ...fluid.layers import npair_loss #DEFINE_ALIAS
from ...fluid.layers import rank_loss #DEFINE_ALIAS from ...fluid.layers import rank_loss #DEFINE_ALIAS
from ...fluid.layers import reshape from ...fluid.layers import reshape
...@@ -371,3 +372,101 @@ def nll_loss(input, ...@@ -371,3 +372,101 @@ def nll_loss(input,
out = reshape(out, shape=out_shape) out = reshape(out, shape=out_shape)
return out return out
def mse_loss(input, label, reduction='mean', name=None):
"""
This op accepts input predications and label and returns the mean square error.
If :attr:`reduction` is set to ``'none'``, loss is calculated as:
.. math::
Out = (input - label)^2
If :attr:`reduction` is set to ``'mean'``, loss is calculated as:
.. math::
Out = \operatorname{mean}((input - label)^2)
If :attr:`reduction` is set to ``'sum'``, loss is calculated as:
.. math::
Out = \operatorname{sum}((input - label)^2)
Parameters:
input (Tensor): Input tensor, the data type should be float32 or float64.
label (Tensor): Label tensor, the data type should be float32 or float64.
reduction (string, optional): The reduction method for the output,
could be 'none' | 'mean' | 'sum'.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned.
If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor tensor storing the mean square error difference of input and label.
Return type: Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
# static graph mode
paddle.enable_static()
mse_loss = paddle.nn.loss.MSELoss()
input = paddle.data(name="input", shape=[1])
label = paddle.data(name="label", shape=[1])
place = paddle.CPUPlace()
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output = mse_loss(input,label)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
output_data = exe.run(
paddle.static.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
"""
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"'reduction' in 'mse_loss' should be 'sum', 'mean' or 'none', "
"but received {}.".format(reduction))
if not paddle.fluid.framework.in_dygraph_mode():
paddle.fluid.data_feeder.check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'mse_loss')
paddle.fluid.data_feeder.check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'mse_loss')
if reduction == 'none':
return paddle.fluid.layers.square(
paddle.fluid.layers.elementwise_sub(input, label), name=name)
elif reduction == 'mean':
return paddle.mean(
paddle.fluid.layers.square(
paddle.fluid.layers.elementwise_sub(input, label)),
name=name)
else:
return paddle.sum(paddle.fluid.layers.square(
paddle.fluid.layers.elementwise_sub(input, label)),
name=name)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册