test_mse_loss.py 13.5 KB
Newer Older
R
ruri 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

R
ruri 已提交
17
import numpy as np
18

19
import paddle
R
ruri 已提交
20
import paddle.fluid as fluid
21
import paddle.fluid.core as core
R
ruri 已提交
22 23 24 25 26 27 28 29 30 31 32
from paddle.fluid.executor import Executor


class TestMseLoss(unittest.TestCase):
    def test_mse_loss(self):
        input_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")
        label_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")

        sub = input_val - label_val
        np_result = np.mean(sub * sub)

33 34
        input_var = fluid.data(name="input", shape=[-1, 3], dtype="float32")
        label_var = fluid.data(name="label", shape=[-1, 3], dtype="float32")
R
ruri 已提交
35

36
        output = paddle.nn.functional.mse_loss(input=input_var, label=label_var)
37 38 39
        for use_cuda in (
            [False, True] if core.is_compiled_with_cuda() else [False]
        ):
R
ruri 已提交
40 41
            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = Executor(place)
42 43 44 45 46
            (result,) = exe.run(
                fluid.default_main_program(),
                feed={"input": input_val, "label": label_val},
                fetch_list=[output],
            )
R
ruri 已提交
47

48
            np.testing.assert_allclose(np_result, result, rtol=1e-05)
R
ruri 已提交
49 50


51 52 53 54
class TestMseInvalidInput(unittest.TestCase):
    def test_error(self):
        def test_invalid_input():
            input = [256, 3]
55
            label = fluid.data(name='label1', shape=[None, 3], dtype='float32')
56
            loss = paddle.nn.functional.mse_loss(input, label)
57 58 59 60 61 62

        self.assertRaises(TypeError, test_invalid_input)

        def test_invalid_label():
            input = fluid.data(name='input1', shape=[None, 3], dtype='float32')
            label = [256, 3]
63
            loss = paddle.nn.functional.mse_loss(input, label)
64 65 66 67

        self.assertRaises(TypeError, test_invalid_label)


68 69 70 71 72
class TestNNMseLoss(unittest.TestCase):
    def test_NNMseLoss_mean(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
73
            paddle.enable_static()
74 75
            prog = fluid.Program()
            startup_prog = fluid.Program()
76 77 78 79 80
            place = (
                fluid.CUDAPlace(0)
                if fluid.core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
81
            with fluid.program_guard(prog, startup_prog):
G
GGBond8488 已提交
82 83
                input = paddle.static.data(
                    name='input', shape=[-1] + dim, dtype='float32'
84
                )
G
GGBond8488 已提交
85 86 87
                input.desc.set_need_check_feed(False)
                label = paddle.static.data(
                    name='label', shape=[-1] + dim, dtype='float32'
88
                )
G
GGBond8488 已提交
89
                label.desc.set_need_check_feed(False)
90 91 92 93
                mse_loss = paddle.nn.loss.MSELoss()
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
94 95 96 97 98
                (static_result,) = exe.run(
                    prog,
                    feed={"input": input_np, "label": label_np},
                    fetch_list=[ret],
                )
99 100 101

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss()
102 103 104 105
                dy_ret = mse_loss(
                    fluid.dygraph.to_variable(input_np),
                    fluid.dygraph.to_variable(label_np),
                )
106 107 108 109
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = np.mean(sub * sub)
110 111 112
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
113 114 115 116 117 118
            self.assertTrue(dy_result.shape, [1])

    def test_NNMseLoss_sum(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
119
            paddle.enable_static()
120 121
            prog = fluid.Program()
            startup_prog = fluid.Program()
122 123 124 125 126
            place = (
                fluid.CUDAPlace(0)
                if fluid.core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
127
            with fluid.program_guard(prog, startup_prog):
G
GGBond8488 已提交
128 129
                input = paddle.static.data(
                    name='input', shape=[-1] + dim, dtype='float32'
130
                )
G
GGBond8488 已提交
131 132 133
                input.desc.set_need_check_feed(False)
                label = paddle.static.data(
                    name='label', shape=[-1] + dim, dtype='float32'
134
                )
G
GGBond8488 已提交
135
                label.desc.set_need_check_feed(False)
136 137 138 139
                mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
140 141 142 143 144
                (static_result,) = exe.run(
                    prog,
                    feed={"input": input_np, "label": label_np},
                    fetch_list=[ret],
                )
145 146 147

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
148 149 150 151
                dy_ret = mse_loss(
                    fluid.dygraph.to_variable(input_np),
                    fluid.dygraph.to_variable(label_np),
                )
152 153 154 155
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
            expected = np.sum(sub * sub)
156 157 158
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
159 160 161 162 163 164
            self.assertTrue(dy_result.shape, [1])

    def test_NNMseLoss_none(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            label_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
165
            paddle.enable_static()
166 167
            prog = fluid.Program()
            startup_prog = fluid.Program()
168 169 170 171 172
            place = (
                fluid.CUDAPlace(0)
                if fluid.core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
173
            with fluid.program_guard(prog, startup_prog):
G
GGBond8488 已提交
174 175
                input = paddle.static.data(
                    name='input', shape=[-1] + dim, dtype='float32'
176
                )
G
GGBond8488 已提交
177 178 179
                input.desc.set_need_check_feed(False)
                label = paddle.static.data(
                    name='label', shape=[-1] + dim, dtype='float32'
180
                )
G
GGBond8488 已提交
181
                label.desc.set_need_check_feed(False)
182 183 184 185
                mse_loss = paddle.nn.loss.MSELoss(reduction='none')
                ret = mse_loss(input, label)

                exe = fluid.Executor(place)
186 187 188 189 190
                (static_result,) = exe.run(
                    prog,
                    feed={"input": input_np, "label": label_np},
                    fetch_list=[ret],
                )
191 192 193

            with fluid.dygraph.guard():
                mse_loss = paddle.nn.loss.MSELoss(reduction='none')
194 195 196 197
                dy_ret = mse_loss(
                    fluid.dygraph.to_variable(input_np),
                    fluid.dygraph.to_variable(label_np),
                )
198 199 200
                dy_result = dy_ret.numpy()

            sub = input_np - label_np
201
            expected = sub * sub
202 203 204
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
205 206 207
            self.assertTrue(dy_result.shape, [1])


208 209 210 211 212 213 214 215
class TestNNFunctionalMseLoss(unittest.TestCase):
    def test_NNFunctionalMseLoss_mean(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
216 217 218 219 220
            place = (
                paddle.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else paddle.CPUPlace()
            )
221
            with paddle.static.program_guard(prog, startup_prog):
222 223 224 225 226 227
                input = paddle.fluid.data(
                    name='input', shape=dim, dtype='float32'
                )
                target = paddle.fluid.data(
                    name='target', shape=dim, dtype='float32'
                )
228 229 230 231
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean')

            exe = paddle.static.Executor(place)
            exe.run(startup_prog)
232 233 234 235 236
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "target": target_np},
                fetch_list=[mse_loss],
            )
237 238

            paddle.disable_static()
239 240 241
            dy_ret = paddle.nn.functional.mse_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'mean'
            )
242 243 244 245
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = np.mean(sub * sub)
246 247 248
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
249 250 251 252 253 254 255 256 257
            self.assertTrue(dy_result.shape, [1])

    def test_NNFunctionalMseLoss_sum(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
258 259 260 261 262
            place = (
                paddle.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else paddle.CPUPlace()
            )
263
            with paddle.static.program_guard(prog, startup_prog):
264 265 266 267 268 269
                input = paddle.fluid.data(
                    name='input', shape=dim, dtype='float32'
                )
                target = paddle.fluid.data(
                    name='target', shape=dim, dtype='float32'
                )
270 271 272 273
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum')

                exe = paddle.static.Executor(place)
                exe.run(startup_prog)
274 275 276 277 278
                (static_result,) = exe.run(
                    prog,
                    feed={"input": input_np, "target": target_np},
                    fetch_list=[mse_loss],
                )
279 280

            paddle.disable_static()
281 282 283
            dy_ret = paddle.nn.functional.mse_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'sum'
            )
284 285 286 287
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = np.sum(sub * sub)
288 289 290
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
291 292 293 294 295 296 297 298 299
            self.assertTrue(dy_result.shape, [1])

    def test_NNFunctionalMseLoss_none(self):
        for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]:
            input_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            target_np = np.random.uniform(0.1, 0.5, dim).astype("float32")
            paddle.enable_static()
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
300 301 302 303 304
            place = (
                paddle.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else paddle.CPUPlace()
            )
305
            with paddle.static.program_guard(prog, startup_prog):
306 307 308 309 310 311
                input = paddle.fluid.data(
                    name='input', shape=dim, dtype='float32'
                )
                target = paddle.fluid.data(
                    name='target', shape=dim, dtype='float32'
                )
312 313 314 315
                mse_loss = paddle.nn.functional.mse_loss(input, target, 'none')

                exe = paddle.static.Executor(place)
                exe.run(startup_prog)
316 317 318 319 320
                (static_result,) = exe.run(
                    prog,
                    feed={"input": input_np, "target": target_np},
                    fetch_list=[mse_loss],
                )
321 322

            paddle.disable_static()
323 324 325
            dy_ret = paddle.nn.functional.mse_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'none'
            )
326 327 328 329
            dy_result = dy_ret.numpy()

            sub = input_np - target_np
            expected = sub * sub
330 331 332
            np.testing.assert_allclose(static_result, expected, rtol=1e-05)
            np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
            np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
333 334 335
            self.assertTrue(dy_result.shape, [1])


R
ruri 已提交
336
if __name__ == "__main__":
337
    paddle.enable_static()
R
ruri 已提交
338
    unittest.main()