test_mse_loss.py 14.4 KB
Newer Older
R
ruri 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
19
import paddle
R
ruri 已提交
20 21 22 23 24 25 26
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor


class TestMseLoss(unittest.TestCase):
27

R
ruri 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
    def test_mse_loss(self):
        input_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")
        label_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")

        sub = input_val - label_val
        np_result = np.mean(sub * sub)

        input_var = layers.create_tensor(dtype="float32", name="input")
        label_var = layers.create_tensor(dtype="float32", name="label")

        output = layers.mse_loss(input=input_var, label=label_var)
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = Executor(place)
43 44 45 46 47 48
            result, = exe.run(fluid.default_main_program(),
                              feed={
                                  "input": input_val,
                                  "label": label_val
                              },
                              fetch_list=[output])
R
ruri 已提交
49

50
            np.testing.assert_allclose(np_result, result, rtol=1e-05)
R
ruri 已提交
51 52


53
class TestMseInvalidInput(unittest.TestCase):
54

55
    def test_error(self):
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
        def test_invalid_input():
            input = [256, 3]
            label = fluid.data(name='label', shape=[None, 3], dtype='float32')
            loss = fluid.layers.mse_loss(input, label)

        self.assertRaises(TypeError, test_invalid_input)

        def test_invalid_label():
            input = fluid.data(name='input1', shape=[None, 3], dtype='float32')
            label = [256, 3]
            loss = fluid.layers.mse_loss(input, label)

        self.assertRaises(TypeError, test_invalid_label)


72
class TestNNMseLoss(unittest.TestCase):
73

74 75 76 77
    def test_NNMseLoss_mean(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
78
            paddle.enable_static()
79 80
            prog = fluid.Program()
            startup_prog = fluid.Program()
81 82
            place = fluid.CUDAPlace(
                0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
83
            with fluid.program_guard(prog, startup_prog):
84 85 86 87 88 89
                input = fluid.layers.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                label = fluid.layers.data(name='label',
                                          shape=dim,
                                          dtype='float32')
90 91 92 93
                mse_loss = paddle.nn.loss.MSELoss()
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
94 95 96 97 98 99
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "label": label_np
                                         },
                                         fetch_list=[ret])
100 101 102

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss()
103 104
                dy_ret = mse_loss(fluid.dygraph.to_variable(input_np),
                                  fluid.dygraph.to_variable(label_np))
105 106 107 108
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = np.mean(sub * sub)
109 110 111
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
112 113 114 115 116 117
            self.assertTrue(dy_result.shape, [1])

    def test_NNMseLoss_sum(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
118
            paddle.enable_static()
119 120
            prog = fluid.Program()
            startup_prog = fluid.Program()
121 122
            place = fluid.CUDAPlace(
                0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
123
            with fluid.program_guard(prog, startup_prog):
124 125 126 127 128 129
                input = fluid.layers.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                label = fluid.layers.data(name='label',
                                          shape=dim,
                                          dtype='float32')
130 131 132 133
                mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
134 135 136 137 138 139
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "label": label_np
                                         },
                                         fetch_list=[ret])
140 141 142

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
143 144
                dy_ret = mse_loss(fluid.dygraph.to_variable(input_np),
                                  fluid.dygraph.to_variable(label_np))
145 146 147 148
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = np.sum(sub * sub)
149 150 151
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
152 153 154 155 156 157
            self.assertTrue(dy_result.shape, [1])

    def test_NNMseLoss_none(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
158
            paddle.enable_static()
159 160
            prog = fluid.Program()
            startup_prog = fluid.Program()
161 162
            place = fluid.CUDAPlace(
                0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
163
            with fluid.program_guard(prog, startup_prog):
164 165 166 167 168 169
                input = fluid.layers.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                label = fluid.layers.data(name='label',
                                          shape=dim,
                                          dtype='float32')
170 171 172 173
                mse_loss = paddle.nn.loss.MSELoss(reduction='none')
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
174 175 176 177 178 179
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "label": label_np
                                         },
                                         fetch_list=[ret])
180 181 182

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss(reduction='none')
183 184
                dy_ret = mse_loss(fluid.dygraph.to_variable(input_np),
                                  fluid.dygraph.to_variable(label_np))
185 186 187 188
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = (sub * sub)
189 190 191
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
192 193 194
            self.assertTrue(dy_result.shape, [1])


195
class TestNNFunctionalMseLoss(unittest.TestCase):
196

197 198 199 200 201 202 203
    def test_NNFunctionalMseLoss_mean(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
204 205
            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
206
            with paddle.static.program_guard(prog, startup_prog):
207 208 209 210 211 212
                input = paddle.fluid.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                target = paddle.fluid.data(name='target',
                                           shape=dim,
                                           dtype='float32')
213 214 215 216
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean')

            exe = paddle.static.Executor(place)
            exe.run(startup_prog)
217 218 219 220 221 222
            static_result, = exe.run(prog,
                                     feed={
                                         "input": input_np,
                                         "target": target_np
                                     },
                                     fetch_list=[mse_loss])
223 224

            paddle.disable_static()
225 226 227
            dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
                                                   paddle.to_tensor(target_np),
                                                   'mean')
228 229 230 231
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = np.mean(sub * sub)
232 233 234
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
235 236 237 238 239 240 241 242 243
            self.assertTrue(dy_result.shape, [1])

    def test_NNFunctionalMseLoss_sum(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
244 245
            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
246
            with paddle.static.program_guard(prog, startup_prog):
247 248 249 250 251 252
                input = paddle.fluid.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                target = paddle.fluid.data(name='target',
                                           shape=dim,
                                           dtype='float32')
253 254 255 256
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum')

                exe = paddle.static.Executor(place)
                exe.run(startup_prog)
257 258 259 260 261 262
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "target": target_np
                                         },
                                         fetch_list=[mse_loss])
263 264

            paddle.disable_static()
265 266 267
            dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
                                                   paddle.to_tensor(target_np),
                                                   'sum')
268 269 270 271
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = np.sum(sub * sub)
272 273 274
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
275 276 277 278 279 280 281 282 283
            self.assertTrue(dy_result.shape, [1])

    def test_NNFunctionalMseLoss_none(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
284 285
            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
286
            with paddle.static.program_guard(prog, startup_prog):
287 288 289 290 291 292
                input = paddle.fluid.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                target = paddle.fluid.data(name='target',
                                           shape=dim,
                                           dtype='float32')
293 294 295 296
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'none')

                exe = paddle.static.Executor(place)
                exe.run(startup_prog)
297 298 299 300 301 302
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "target": target_np
                                         },
                                         fetch_list=[mse_loss])
303 304

            paddle.disable_static()
305 306 307
            dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
                                                   paddle.to_tensor(target_np),
                                                   'none')
308 309 310 311
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = sub * sub
312 313 314
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
315 316 317
            self.assertTrue(dy_result.shape, [1])


R
ruri 已提交
318
if __name__ == "__main__":
319
    paddle.enable_static()
R
ruri 已提交
320
    unittest.main()