Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
22b06db3
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
22b06db3
编写于
8月 15, 2020
作者:
B
Bai Yifan
提交者:
GitHub
8月 15, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add paddle.nn.functional.mse_loss (#26089)
* add paddle.nn.functional.mse_loss * add name * fix conflict
上级
29367bfe
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
210 addition
and
1 deletion
+210
-1
python/paddle/fluid/tests/unittests/test_mse_loss.py
python/paddle/fluid/tests/unittests/test_mse_loss.py
+110
-0
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+100
-1
未找到文件。
python/paddle/fluid/tests/unittests/test_mse_loss.py
浏览文件 @
22b06db3
...
@@ -69,6 +69,7 @@ class TestNNMseLoss(unittest.TestCase):
...
@@ -69,6 +69,7 @@ class TestNNMseLoss(unittest.TestCase):
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
label_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
label_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
paddle
.
enable_static
()
prog
=
fluid
.
Program
()
prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
...
@@ -106,6 +107,7 @@ class TestNNMseLoss(unittest.TestCase):
...
@@ -106,6 +107,7 @@ class TestNNMseLoss(unittest.TestCase):
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
label_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
label_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
paddle
.
enable_static
()
prog
=
fluid
.
Program
()
prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
...
@@ -143,6 +145,7 @@ class TestNNMseLoss(unittest.TestCase):
...
@@ -143,6 +145,7 @@ class TestNNMseLoss(unittest.TestCase):
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
label_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
label_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
paddle
.
enable_static
()
prog
=
fluid
.
Program
()
prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
...
@@ -177,5 +180,112 @@ class TestNNMseLoss(unittest.TestCase):
...
@@ -177,5 +180,112 @@ class TestNNMseLoss(unittest.TestCase):
self
.
assertTrue
(
dy_result
.
shape
,
[
1
])
self
.
assertTrue
(
dy_result
.
shape
,
[
1
])
class
TestNNFunctionalMseLoss
(
unittest
.
TestCase
):
def
test_NNFunctionalMseLoss_mean
(
self
):
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
target_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
paddle
.
enable_static
()
prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
(
)
else
paddle
.
CPUPlace
()
with
paddle
.
static
.
program_guard
(
prog
,
startup_prog
):
input
=
paddle
.
data
(
name
=
'input'
,
shape
=
dim
,
dtype
=
'float32'
)
target
=
paddle
.
data
(
name
=
'target'
,
shape
=
dim
,
dtype
=
'float32'
)
mse_loss
=
paddle
.
nn
.
functional
.
mse_loss
(
input
,
target
,
'mean'
)
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
static_result
=
exe
.
run
(
prog
,
feed
=
{
"input"
:
input_np
,
"target"
:
target_np
},
fetch_list
=
[
mse_loss
])
paddle
.
disable_static
()
dy_ret
=
paddle
.
nn
.
functional
.
mse_loss
(
paddle
.
to_variable
(
input_np
),
paddle
.
to_variable
(
target_np
),
'mean'
)
dy_result
=
dy_ret
.
numpy
()
sub
=
input_np
-
target_np
expected
=
np
.
mean
(
sub
*
sub
)
self
.
assertTrue
(
np
.
allclose
(
static_result
,
expected
))
self
.
assertTrue
(
np
.
allclose
(
static_result
,
dy_result
))
self
.
assertTrue
(
np
.
allclose
(
dy_result
,
expected
))
self
.
assertTrue
(
dy_result
.
shape
,
[
1
])
def
test_NNFunctionalMseLoss_sum
(
self
):
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
target_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
paddle
.
enable_static
()
prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
(
)
else
paddle
.
CPUPlace
()
with
paddle
.
static
.
program_guard
(
prog
,
startup_prog
):
input
=
paddle
.
data
(
name
=
'input'
,
shape
=
dim
,
dtype
=
'float32'
)
target
=
paddle
.
data
(
name
=
'target'
,
shape
=
dim
,
dtype
=
'float32'
)
mse_loss
=
paddle
.
nn
.
functional
.
mse_loss
(
input
,
target
,
'sum'
)
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
static_result
=
exe
.
run
(
prog
,
feed
=
{
"input"
:
input_np
,
"target"
:
target_np
},
fetch_list
=
[
mse_loss
])
paddle
.
disable_static
()
dy_ret
=
paddle
.
nn
.
functional
.
mse_loss
(
paddle
.
to_variable
(
input_np
),
paddle
.
to_variable
(
target_np
),
'sum'
)
dy_result
=
dy_ret
.
numpy
()
sub
=
input_np
-
target_np
expected
=
np
.
sum
(
sub
*
sub
)
self
.
assertTrue
(
np
.
allclose
(
static_result
,
expected
))
self
.
assertTrue
(
np
.
allclose
(
static_result
,
dy_result
))
self
.
assertTrue
(
np
.
allclose
(
dy_result
,
expected
))
self
.
assertTrue
(
dy_result
.
shape
,
[
1
])
def
test_NNFunctionalMseLoss_none
(
self
):
for
dim
in
[[
10
,
10
],
[
2
,
10
,
10
],
[
3
,
3
,
10
,
10
]]:
input_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
target_np
=
np
.
random
.
uniform
(
0.1
,
0.5
,
dim
).
astype
(
"float32"
)
paddle
.
enable_static
()
prog
=
paddle
.
static
.
Program
()
startup_prog
=
paddle
.
static
.
Program
()
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
(
)
else
paddle
.
CPUPlace
()
with
paddle
.
static
.
program_guard
(
prog
,
startup_prog
):
input
=
paddle
.
data
(
name
=
'input'
,
shape
=
dim
,
dtype
=
'float32'
)
target
=
paddle
.
data
(
name
=
'target'
,
shape
=
dim
,
dtype
=
'float32'
)
mse_loss
=
paddle
.
nn
.
functional
.
mse_loss
(
input
,
target
,
'none'
)
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
static_result
=
exe
.
run
(
prog
,
feed
=
{
"input"
:
input_np
,
"target"
:
target_np
},
fetch_list
=
[
mse_loss
])
paddle
.
disable_static
()
dy_ret
=
paddle
.
nn
.
functional
.
mse_loss
(
paddle
.
to_variable
(
input_np
),
paddle
.
to_variable
(
target_np
),
'none'
)
dy_result
=
dy_ret
.
numpy
()
sub
=
input_np
-
target_np
expected
=
sub
*
sub
self
.
assertTrue
(
np
.
allclose
(
static_result
,
expected
))
self
.
assertTrue
(
np
.
allclose
(
static_result
,
dy_result
))
self
.
assertTrue
(
np
.
allclose
(
dy_result
,
expected
))
self
.
assertTrue
(
dy_result
.
shape
,
[
1
])
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/nn/functional/loss.py
浏览文件 @
22b06db3
...
@@ -12,6 +12,8 @@
...
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
paddle
# TODO: define loss functions of neural network
# TODO: define loss functions of neural network
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
...
@@ -25,7 +27,6 @@ from ...fluid.layers import dice_loss #DEFINE_ALIAS
...
@@ -25,7 +27,6 @@ from ...fluid.layers import dice_loss #DEFINE_ALIAS
from
...fluid.layers
import
iou_similarity
#DEFINE_ALIAS
from
...fluid.layers
import
iou_similarity
#DEFINE_ALIAS
from
...fluid.layers
import
kldiv_loss
#DEFINE_ALIAS
from
...fluid.layers
import
kldiv_loss
#DEFINE_ALIAS
from
...fluid.layers
import
log_loss
#DEFINE_ALIAS
from
...fluid.layers
import
log_loss
#DEFINE_ALIAS
from
...fluid.layers
import
mse_loss
#DEFINE_ALIAS
from
...fluid.layers
import
npair_loss
#DEFINE_ALIAS
from
...fluid.layers
import
npair_loss
#DEFINE_ALIAS
from
...fluid.layers
import
rank_loss
#DEFINE_ALIAS
from
...fluid.layers
import
rank_loss
#DEFINE_ALIAS
from
...fluid.layers
import
reshape
from
...fluid.layers
import
reshape
...
@@ -371,3 +372,101 @@ def nll_loss(input,
...
@@ -371,3 +372,101 @@ def nll_loss(input,
out
=
reshape
(
out
,
shape
=
out_shape
)
out
=
reshape
(
out
,
shape
=
out_shape
)
return
out
return
out
def
mse_loss
(
input
,
label
,
reduction
=
'mean'
,
name
=
None
):
"""
This op accepts input predications and label and returns the mean square error.
If :attr:`reduction` is set to ``'none'``, loss is calculated as:
.. math::
Out = (input - label)^2
If :attr:`reduction` is set to ``'mean'``, loss is calculated as:
.. math::
Out = \operatorname{mean}((input - label)^2)
If :attr:`reduction` is set to ``'sum'``, loss is calculated as:
.. math::
Out = \operatorname{sum}((input - label)^2)
Parameters:
input (Tensor): Input tensor, the data type should be float32 or float64.
label (Tensor): Label tensor, the data type should be float32 or float64.
reduction (string, optional): The reduction method for the output,
could be 'none' | 'mean' | 'sum'.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned.
If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor tensor storing the mean square error difference of input and label.
Return type: Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
# static graph mode
paddle.enable_static()
mse_loss = paddle.nn.loss.MSELoss()
input = paddle.data(name="input", shape=[1])
label = paddle.data(name="label", shape=[1])
place = paddle.CPUPlace()
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output = mse_loss(input,label)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
output_data = exe.run(
paddle.static.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]
"""
if
reduction
not
in
[
'sum'
,
'mean'
,
'none'
]:
raise
ValueError
(
"'reduction' in 'mse_loss' should be 'sum', 'mean' or 'none', "
"but received {}."
.
format
(
reduction
))
if
not
paddle
.
fluid
.
framework
.
in_dygraph_mode
():
paddle
.
fluid
.
data_feeder
.
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'mse_loss'
)
paddle
.
fluid
.
data_feeder
.
check_variable_and_dtype
(
label
,
'label'
,
[
'float32'
,
'float64'
],
'mse_loss'
)
if
reduction
==
'none'
:
return
paddle
.
fluid
.
layers
.
square
(
paddle
.
fluid
.
layers
.
elementwise_sub
(
input
,
label
),
name
=
name
)
elif
reduction
==
'mean'
:
return
paddle
.
mean
(
paddle
.
fluid
.
layers
.
square
(
paddle
.
fluid
.
layers
.
elementwise_sub
(
input
,
label
)),
name
=
name
)
else
:
return
paddle
.
sum
(
paddle
.
fluid
.
layers
.
square
(
paddle
.
fluid
.
layers
.
elementwise_sub
(
input
,
label
)),
name
=
name
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录