未验证 提交 d31c92a2 编写于 作者: R ruri 提交者: GitHub

add mse_loss (#19759)

* add mse_loss op
上级 85b398f1
......@@ -297,6 +297,7 @@ paddle.fluid.layers.deformable_roi_pooling (ArgSpec(args=['input', 'rois', 'tran
paddle.fluid.layers.filter_by_instag (ArgSpec(args=['ins', 'ins_tag', 'filter_tag', 'is_lod'], varargs=None, keywords=None, defaults=None), ('document', '7703a2088af8de4128b143ff1164ca4a'))
paddle.fluid.layers.shard_index (ArgSpec(args=['input', 'index_num', 'nshards', 'shard_id', 'ignore_value'], varargs=None, keywords=None, defaults=(-1,)), ('document', '5786fdbba6753ecd6cbce5e6b0889924'))
paddle.fluid.layers.hard_swish (ArgSpec(args=['x', 'threshold', 'scale', 'offset', 'name'], varargs=None, keywords=None, defaults=(6.0, 6.0, 3.0, None)), ('document', '6a5152a7015c62cb8278fc24cb456459'))
paddle.fluid.layers.mse_loss (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', 'd9ede6469288636e1b3233b461a165c9'))
paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '9d7806e31bdf727c1a23b8782a09b545'))
paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', '88367daf9a30c9ab83adc5d7221e23ef'))
paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '44724c493f41a124abc7531c2740e2e3'))
......
......@@ -220,6 +220,7 @@ __all__ = [
'filter_by_instag',
'shard_index',
'hard_swish',
'mse_loss',
]
kIgnoreIndex = -100
......@@ -14215,3 +14216,40 @@ def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
'scale': scale,
'offset': offset})
return out
def mse_loss(input, label):
"""
**Mean square error layer**
This layer accepts input predications and target label and returns the mean square error.
The loss can be described as:
.. math::
Out = mean((X - Y)^2)
In the above equation:
* :math:`X`: Input predications, a tensor.
* :math:`Y`: Input labels, a tensor.
* :math:`Out`: Output value, same shape with :math:`X`.
Args:
input (Variable): Input tensor, has predictions.
label (Variable): Label tensor, has target labels.
Returns:
Variable: The tensor variable storing the mean square error difference of input and label.
Examples:
.. code-block:: python
import paddle.fluid as fluid
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.data(name='y_predict', shape=[1], dtype='float32')
mse = fluid.layers.mse_loss(input=y_predict, label=y)
"""
return reduce_mean(square_error_cost(input, label))
......@@ -2046,6 +2046,14 @@ class TestBook(LayerTest):
out = layers.pixel_shuffle(x, upscale_factor=3)
return (out)
def make_mse_loss(self):
with program_guard(fluid.default_main_program(),
fluid.default_startup_program()):
x = self._get_data(name="X", shape=[1], dtype="float32")
y = self._get_data(name="Y", shape=[1], dtype="float32")
out = layers.mse_loss(input=x, label=y)
return (out)
def make_square_error_cost(self):
with program_guard(fluid.default_main_program(),
fluid.default_startup_program()):
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
class TestMseLoss(unittest.TestCase):
def test_mse_loss(self):
input_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")
label_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")
sub = input_val - label_val
np_result = np.mean(sub * sub)
input_var = layers.create_tensor(dtype="float32", name="input")
label_var = layers.create_tensor(dtype="float32", name="label")
layers.assign(input=input_val, output=input_var)
layers.assign(input=label_val, output=label_var)
output = layers.mse_loss(input=input_var, label=label_var)
for use_cuda in ([False, True]
if core.is_compiled_with_cuda() else [False]):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
result = exe.run(fluid.default_main_program(),
feed={"input": input_var,
"label": label_var},
fetch_list=[output])
self.assertTrue(np.isclose(np_result, result).all())
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册