test_mse_loss.py 14.4 KB
Newer Older
R
ruri 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
17
import paddle
R
ruri 已提交
18 19 20 21 22 23 24
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor


class TestMseLoss(unittest.TestCase):
25

R
ruri 已提交
26 27 28 29 30 31 32
    def test_mse_loss(self):
        input_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")
        label_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")

        sub = input_val - label_val
        np_result = np.mean(sub * sub)

33 34
        input_var = fluid.data(name="input", shape=[-1, 3], dtype="float32")
        label_var = fluid.data(name="label", shape=[-1, 3], dtype="float32")
R
ruri 已提交
35 36 37 38 39 40

        output = layers.mse_loss(input=input_var, label=label_var)
        for use_cuda in ([False, True]
                         if core.is_compiled_with_cuda() else [False]):
            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = Executor(place)
41 42 43 44 45 46
            result, = exe.run(fluid.default_main_program(),
                              feed={
                                  "input": input_val,
                                  "label": label_val
                              },
                              fetch_list=[output])
R
ruri 已提交
47

48
            np.testing.assert_allclose(np_result, result, rtol=1e-05)
R
ruri 已提交
49 50


51
class TestMseInvalidInput(unittest.TestCase):
52

53
    def test_error(self):
54

55 56
        def test_invalid_input():
            input = [256, 3]
57
            label = fluid.data(name='label1', shape=[None, 3], dtype='float32')
58 59 60 61 62 63 64 65 66 67 68 69
            loss = fluid.layers.mse_loss(input, label)

        self.assertRaises(TypeError, test_invalid_input)

        def test_invalid_label():
            input = fluid.data(name='input1', shape=[None, 3], dtype='float32')
            label = [256, 3]
            loss = fluid.layers.mse_loss(input, label)

        self.assertRaises(TypeError, test_invalid_label)


70
class TestNNMseLoss(unittest.TestCase):
71

72 73 74 75
    def test_NNMseLoss_mean(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
76
            paddle.enable_static()
77 78
            prog = fluid.Program()
            startup_prog = fluid.Program()
79 80
            place = fluid.CUDAPlace(
                0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
81
            with fluid.program_guard(prog, startup_prog):
82 83 84 85 86 87
                input = fluid.layers.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                label = fluid.layers.data(name='label',
                                          shape=dim,
                                          dtype='float32')
88 89 90 91
                mse_loss = paddle.nn.loss.MSELoss()
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
92 93 94 95 96 97
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "label": label_np
                                         },
                                         fetch_list=[ret])
98 99 100

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss()
101 102
                dy_ret = mse_loss(fluid.dygraph.to_variable(input_np),
                                  fluid.dygraph.to_variable(label_np))
103 104 105 106
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = np.mean(sub * sub)
107 108 109
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
110 111 112 113 114 115
            self.assertTrue(dy_result.shape, [1])

    def test_NNMseLoss_sum(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
116
            paddle.enable_static()
117 118
            prog = fluid.Program()
            startup_prog = fluid.Program()
119 120
            place = fluid.CUDAPlace(
                0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
121
            with fluid.program_guard(prog, startup_prog):
122 123 124 125 126 127
                input = fluid.layers.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                label = fluid.layers.data(name='label',
                                          shape=dim,
                                          dtype='float32')
128 129 130 131
                mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
132 133 134 135 136 137
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "label": label_np
                                         },
                                         fetch_list=[ret])
138 139 140

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
141 142
                dy_ret = mse_loss(fluid.dygraph.to_variable(input_np),
                                  fluid.dygraph.to_variable(label_np))
143 144 145 146
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = np.sum(sub * sub)
147 148 149
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
150 151 152 153 154 155
            self.assertTrue(dy_result.shape, [1])

    def test_NNMseLoss_none(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
156
            paddle.enable_static()
157 158
            prog = fluid.Program()
            startup_prog = fluid.Program()
159 160
            place = fluid.CUDAPlace(
                0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
161
            with fluid.program_guard(prog, startup_prog):
162 163 164 165 166 167
                input = fluid.layers.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                label = fluid.layers.data(name='label',
                                          shape=dim,
                                          dtype='float32')
168 169 170 171
                mse_loss = paddle.nn.loss.MSELoss(reduction='none')
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
172 173 174 175 176 177
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "label": label_np
                                         },
                                         fetch_list=[ret])
178 179 180

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss(reduction='none')
181 182
                dy_ret = mse_loss(fluid.dygraph.to_variable(input_np),
                                  fluid.dygraph.to_variable(label_np))
183 184 185 186
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = (sub * sub)
187 188 189
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
190 191 192
            self.assertTrue(dy_result.shape, [1])


193
class TestNNFunctionalMseLoss(unittest.TestCase):
194

195 196 197 198 199 200 201
    def test_NNFunctionalMseLoss_mean(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
202 203
            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
204
            with paddle.static.program_guard(prog, startup_prog):
205 206 207 208 209 210
                input = paddle.fluid.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                target = paddle.fluid.data(name='target',
                                           shape=dim,
                                           dtype='float32')
211 212 213 214
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean')

            exe = paddle.static.Executor(place)
            exe.run(startup_prog)
215 216 217 218 219 220
            static_result, = exe.run(prog,
                                     feed={
                                         "input": input_np,
                                         "target": target_np
                                     },
                                     fetch_list=[mse_loss])
221 222

            paddle.disable_static()
223 224 225
            dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
                                                   paddle.to_tensor(target_np),
                                                   'mean')
226 227 228 229
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = np.mean(sub * sub)
230 231 232
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
233 234 235 236 237 238 239 240 241
            self.assertTrue(dy_result.shape, [1])

    def test_NNFunctionalMseLoss_sum(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
242 243
            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
244
            with paddle.static.program_guard(prog, startup_prog):
245 246 247 248 249 250
                input = paddle.fluid.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                target = paddle.fluid.data(name='target',
                                           shape=dim,
                                           dtype='float32')
251 252 253 254
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum')

                exe = paddle.static.Executor(place)
                exe.run(startup_prog)
255 256 257 258 259 260
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "target": target_np
                                         },
                                         fetch_list=[mse_loss])
261 262

            paddle.disable_static()
263 264 265
            dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
                                                   paddle.to_tensor(target_np),
                                                   'sum')
266 267 268 269
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = np.sum(sub * sub)
270 271 272
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
273 274 275 276 277 278 279 280 281
            self.assertTrue(dy_result.shape, [1])

    def test_NNFunctionalMseLoss_none(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
282 283
            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
284
            with paddle.static.program_guard(prog, startup_prog):
285 286 287 288 289 290
                input = paddle.fluid.data(name='input',
                                          shape=dim,
                                          dtype='float32')
                target = paddle.fluid.data(name='target',
                                           shape=dim,
                                           dtype='float32')
291 292 293 294
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'none')

                exe = paddle.static.Executor(place)
                exe.run(startup_prog)
295 296 297 298 299 300
                static_result, = exe.run(prog,
                                         feed={
                                             "input": input_np,
                                             "target": target_np
                                         },
                                         fetch_list=[mse_loss])
301 302

            paddle.disable_static()
303 304 305
            dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
                                                   paddle.to_tensor(target_np),
                                                   'none')
306 307 308 309
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = sub * sub
310 311 312
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
313 314 315
            self.assertTrue(dy_result.shape, [1])


R
ruri 已提交
316
if __name__ == "__main__":
317
    paddle.enable_static()
R
ruri 已提交
318
    unittest.main()