test_nll_loss.py 45.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16 17

import numpy as np
18
from op_test import OpTest
19 20 21

import paddle
import paddle.fluid as fluid
22 23


24 25 26
def nll_loss_1d(
    logs, targets, weight=None, reduction='mean', ignore_index=-100
):
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
    input_shape = logs.shape
    N = input_shape[0]
    C = input_shape[1]
    out = np.zeros_like(targets).astype(np.float64)
    total_weight = 0
    for i in range(N):
        cur_target = targets[i]
        if cur_target == ignore_index:
            out[i] = 0
            continue
        cur_weight = weight[cur_target] if weight is not None else 1
        total_weight += cur_weight
        out[i] = -logs[i][cur_target] * cur_weight
    if reduction == 'sum':
        return np.sum(out), np.array([total_weight]).astype('float64')
    elif reduction == 'mean':
43 44 45
        return out.sum() / total_weight, np.array([total_weight]).astype(
            'float64'
        )
46 47 48 49
    elif reduction == 'none':
        return out


50 51 52
def nll_loss_2d(
    logs, targets, weight=None, reduction='mean', ignore_index=-100
):
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
    input_shape = logs.shape
    N = input_shape[0]
    H = input_shape[2]
    W = input_shape[3]
    out = np.zeros_like(targets).astype(np.float64)
    total_weight = 0
    for i in range(N):
        for h in range(H):
            for w in range(W):
                cur_target = targets[i][h][w]
                if cur_target == ignore_index:
                    out[i][h][w] = 0
                    continue
                cur_weight = weight[cur_target] if weight is not None else 1
                total_weight += cur_weight
                out[i][h][w] = -logs[i][cur_target][h][w] * cur_weight
    if reduction == 'sum':
        return np.sum(out), np.array([total_weight]).astype('float64')
    elif reduction == 'mean':
72 73 74
        return out.sum() / total_weight, np.array([total_weight]).astype(
            'float64'
        )
75 76 77 78 79 80
    elif reduction == 'none':
        return out


class TestNLLLoss(unittest.TestCase):
    def test_NLLLoss_1D_mean(self):
81
        np.random.seed(200)
82
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
83
        np.random.seed(200)
84
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
85 86
        prog = fluid.Program()
        startup_prog = fluid.Program()
87 88 89 90 91 92
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
93 94 95 96 97 98 99
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
100 101 102 103 104
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
105 106 107

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
108 109 110
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
111 112
            dy_result = dy_res.numpy()

H
hong 已提交
113
        with fluid.dygraph.guard():
114 115 116 117 118
            nll_loss = paddle.nn.loss.NLLLoss()
            eager_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
            eager_result = eager_res.numpy()
H
hong 已提交
119

120
        expected = nll_loss_1d(input_np, label_np)[0]
121 122 123 124
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
        np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
125 126

    def test_NLLLoss_1D_sum(self):
127
        np.random.seed(200)
128
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
129
        np.random.seed(200)
130
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
131 132
        prog = fluid.Program()
        startup_prog = fluid.Program()
133 134 135 136 137 138
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
139 140 141 142 143 144 145
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
146 147 148 149 150
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
151 152 153

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
154 155 156
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
157 158
            dy_result = dy_res.numpy()

159 160 161 162 163 164 165 166
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
            in_t = paddle.to_tensor(input_np)
            label = paddle.to_tensor(label_np)
            in_t.stop_gradient = False
            eager_res = nll_loss(in_t, label)
            eager_result = eager_res.numpy()
            loss = eager_res.sum()
            loss.backward()
H
hong 已提交
167

168
        expected = nll_loss_1d(input_np, label_np, reduction='sum')[0]
169 170 171 172
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
        np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
173 174

    def test_NLLLoss_1D_with_weight_mean(self):
175
        np.random.seed(200)
176
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
177
        np.random.seed(200)
178 179
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
180 181
        prog = fluid.Program()
        startup_prog = fluid.Program()
182 183 184 185 186
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
187 188 189 190 191 192 193 194 195
        # place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
196 197 198 199 200 201 202 203 204
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
205 206 207

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
208 209 210 211 212
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
213
            dy_result = dy_res.numpy()
H
hong 已提交
214

215 216 217 218 219 220 221 222 223
            nll_loss = paddle.nn.loss.NLLLoss(
                weight=paddle.to_tensor(weight_np)
            )
            eager_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
            loss = eager_res.sum()
            loss.backward()
            eager_result = eager_res.numpy()
H
hong 已提交
224

225 226
        expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]

227 228 229 230
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
        np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
231 232

    def test_NLLLoss_1D_with_weight_sum(self):
233
        np.random.seed(200)
234
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
235
        np.random.seed(200)
236 237
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
238 239
        prog = fluid.Program()
        startup_prog = fluid.Program()
240 241 242 243 244
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
245 246 247 248 249 250 251 252 253
        # place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
254 255 256 257 258 259 260 261 262
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
263 264 265

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
266 267 268 269 270
                weight=paddle.to_tensor(weight_np), reduction='sum'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
271
            dy_result = dy_res.numpy()
272 273 274
        expected = nll_loss_1d(
            input_np, label_np, weight=weight_np, reduction='sum'
        )[0]
275

276 277 278
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
279 280

    def test_NLLLoss_1D_with_weight_mean_cpu(self):
281
        np.random.seed(200)
282
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
283
        np.random.seed(200)
284 285
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
286 287 288 289 290 291 292 293 294 295 296
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
297 298 299 300 301 302 303 304 305
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
306 307 308

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
309 310 311 312 313
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
314 315 316
            dy_result = dy_res.numpy()
        expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]

317 318 319
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
320 321

    def test_NLLLoss_1D_with_weight_no_reduce_cpu(self):
322
        np.random.seed(200)
323
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
324
        np.random.seed(200)
325 326
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
327 328 329 330 331 332 333 334 335 336 337
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
338 339 340 341 342 343 344 345 346
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
347 348 349

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
350 351 352 353 354
                weight=paddle.to_tensor(weight_np), reduction='none'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
355
            dy_result = dy_res.numpy()
356 357 358
        expected = nll_loss_1d(
            input_np, label_np, weight=weight_np, reduction='none'
        )
359

360 361 362
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
363 364

    def test_NLLLoss_2D_mean(self):
365
        np.random.seed(200)
366
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
367
        np.random.seed(200)
368 369 370
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
371 372 373 374 375 376
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
377
        with fluid.program_guard(prog, startup_prog):
378 379 380
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
381 382 383 384 385
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
386 387 388 389 390
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
391 392 393

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
394 395 396
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
397 398 399 400
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np)[0]

401 402 403
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
404 405

    def test_NLLLoss_2D_sum(self):
406
        np.random.seed(200)
407
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
408
        np.random.seed(200)
409 410 411
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
412 413 414 415 416 417
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
418
        with fluid.program_guard(prog, startup_prog):
419 420 421
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
422 423 424 425 426
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
427 428 429 430 431
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
432 433 434

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
435 436 437
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
438 439 440 441
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, reduction='sum')[0]

442 443 444
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
445 446

    def test_NLLLoss_2D_with_weight_mean(self):
447
        np.random.seed(200)
448
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
449
        np.random.seed(200)
450
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
451
        weight_np = np.random.random(size=(3,)).astype(np.float64)
452 453
        prog = fluid.Program()
        startup_prog = fluid.Program()
454 455 456 457 458 459
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
460
        with fluid.program_guard(prog, startup_prog):
461 462 463
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
464 465 466 467 468 469 470
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
471 472 473 474 475 476 477 478 479
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
480 481 482

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
483 484 485 486 487
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
488 489 490 491
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]

492 493 494
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
495 496

    def test_NLLLoss_2D_with_weight_mean_cpu(self):
497
        np.random.seed(200)
498
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
499
        np.random.seed(200)
500
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
501
        weight_np = np.random.random(size=(3,)).astype(np.float64)
502 503 504 505
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
506 507 508
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
509 510 511 512 513 514 515
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
516 517 518 519 520 521 522 523 524
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
525 526 527

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
528 529 530 531 532
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
533 534 535 536
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]

537 538 539
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
540 541

    def test_NLLLoss_2D_with_weight_sum(self):
542
        np.random.seed(200)
543
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
544
        np.random.seed(200)
545
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
546
        weight_np = np.random.random(size=(3,)).astype(np.float64)
547 548
        prog = fluid.Program()
        startup_prog = fluid.Program()
549 550 551 552 553
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
554
        with fluid.program_guard(prog, startup_prog):
555 556 557
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
558 559 560 561 562 563 564
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
565 566 567 568 569 570 571 572 573
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
574 575 576

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
577 578 579 580 581
                weight=paddle.to_tensor(weight_np), reduction='sum'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
582 583
            dy_result = dy_res.numpy()

584 585 586
        expected = nll_loss_2d(
            input_np, label_np, weight=weight_np, reduction='sum'
        )[0]
587

588 589 590
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
591 592

    def test_NLLLoss_in_dims_not_2or4_mean(self):
593
        np.random.seed(200)
594
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
595
        np.random.seed(200)
596 597 598
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
599 600 601 602 603 604
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
605
        with fluid.program_guard(prog, startup_prog):
606 607 608
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
609 610 611 612 613
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
614 615 616 617 618
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
619 620 621

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
622 623 624
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
625 626 627 628
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
629 630 631
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
632 633 634
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
        expected = nll_loss_2d(input_np_reshape, label_np_reshape)[0]

635 636 637
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
638 639

    def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self):
640
        np.random.seed(200)
641
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
642
        np.random.seed(200)
643
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
644
        weight_np = np.random.random(size=(3,)).astype(np.float64)
645 646
        prog = fluid.Program()
        startup_prog = fluid.Program()
647 648 649 650 651 652
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
653
        with fluid.program_guard(prog, startup_prog):
654 655 656
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
657 658 659 660 661 662
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
663 664 665 666 667 668 669 670 671
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
672 673 674

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
675 676 677 678 679
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
680 681 682 683
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
684 685 686
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
687
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
688 689 690
        expected = nll_loss_2d(
            input_np_reshape, label_np_reshape, weight=weight_np
        )[0]
691

692 693 694
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
695 696

    def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self):
697
        np.random.seed(200)
698
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
699
        np.random.seed(200)
700
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
701
        weight_np = np.random.random(size=(3,)).astype(np.float64)
702 703
        prog = fluid.Program()
        startup_prog = fluid.Program()
704 705 706 707 708
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
709 710
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
711 712 713
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
714 715 716 717 718 719
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
720 721 722 723 724 725 726 727 728
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
729 730 731

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
732 733 734 735 736
                weight=paddle.to_tensor(weight_np), reduction='sum'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
737 738 739 740
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
741 742 743
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
744
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
745 746 747 748 749 750
        expected = nll_loss_2d(
            input_np_reshape,
            label_np_reshape,
            weight=weight_np,
            reduction='sum',
        )[0]
751

752 753 754
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
755 756

    def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self):
757
        np.random.seed(200)
758
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
759
        np.random.seed(200)
760
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
761
        weight_np = np.random.random(size=(3,)).astype(np.float64)
762 763
        prog = fluid.Program()
        startup_prog = fluid.Program()
764 765 766 767 768 769
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
770
        with fluid.program_guard(prog, startup_prog):
771 772 773
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
774 775 776 777 778 779
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
780 781 782 783 784 785 786 787 788
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
789 790 791

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
792 793 794 795 796
                weight=paddle.to_tensor(weight_np), reduction='none'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
797 798 799 800
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
801 802 803 804
        out_shape = (input_shape[0],) + input_shape[2:]
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
805
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
806 807 808 809 810 811
        expected = nll_loss_2d(
            input_np_reshape,
            label_np_reshape,
            weight=weight_np,
            reduction='none',
        )
812
        expected = np.reshape(expected, out_shape)
813 814 815
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
816 817

    def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self):
818
        np.random.seed(200)
819
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
820
        np.random.seed(200)
821
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
822
        weight_np = np.random.random(size=(3,)).astype(np.float64)
823 824 825 826
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
827 828 829
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
830 831 832 833 834 835
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
836 837 838 839 840 841 842 843 844
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
845 846 847

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
848 849 850 851 852
                weight=paddle.to_tensor(weight_np), reduction='none'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
853 854 855 856
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
857 858 859 860
        out_shape = (input_shape[0],) + input_shape[2:]
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
861
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
862 863 864 865 866 867
        expected = nll_loss_2d(
            input_np_reshape,
            label_np_reshape,
            weight=weight_np,
            reduction='none',
        )
868
        expected = np.reshape(expected, out_shape)
869 870 871
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
872 873 874 875 876 877


class TestNLLLossOp1DWithReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
878 879
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
880
        self.with_weight = False
H
hong 已提交
881 882
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
883
        np.random.seed(200)
884 885 886
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
887
        np.random.seed(200)
888 889 890
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
891 892 893
        output_np, total_weight_np = nll_loss_1d(input_np, label_np)
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
894
            np.random.seed(200)
895 896 897 898 899 900
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_1d(
                input_np, label_np, weight=weight_np
            )
901 902 903 904 905 906
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'mean', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
907
        self.check_output(check_eager=True)
908 909 910

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
911
        self.check_output(check_eager=True)
912 913 914 915

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
916
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
917 918
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
919
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
920 921 922 923 924 925 926 927 928 929

    def init_test_case(self):
        self.input_shape = [10, 10]
        self.label_shape = [10]


class TestNLLLossOp1DNoReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
930 931
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
932
        self.with_weight = False
933
        np.random.seed(200)
934 935 936
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
937
        np.random.seed(200)
938 939 940
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
941 942 943 944
        output_np = nll_loss_1d(input_np, label_np, reduction='none')
        total_weight_np = np.array([0]).astype('float64')
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
945
            np.random.seed(200)
946 947 948 949 950 951
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_1d(
                input_np, label_np, weight=weight_np, reduction='none'
            )
952 953 954 955 956 957
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'none', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
958
        self.check_output(check_eager=True)
959 960 961

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
962
        self.check_output(check_eager=True)
963 964 965 966

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
967
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
968 969
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
970
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
971 972 973 974 975 976 977 978 979 980

    def init_test_case(self):
        self.input_shape = [10, 10]
        self.label_shape = [10]


class TestNLLLossOp2DWithReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
981 982
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
983
        self.with_weight = False
984
        np.random.seed(200)
985 986 987
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
988
        np.random.seed(200)
989 990 991
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
992 993 994
        output_np, total_weight_np = nll_loss_2d(input_np, label_np)
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
995
            np.random.seed(200)
996 997 998 999 1000 1001
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_2d(
                input_np, label_np, weight=weight_np
            )
1002 1003 1004 1005 1006 1007
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'mean', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
1008
        self.check_output(check_eager=True)
1009 1010 1011

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
1012
        self.check_output(check_eager=True)
1013 1014 1015 1016

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
1017
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1018 1019
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
1020
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1021 1022

    def init_test_case(self):
1023 1024
        self.input_shape = [2, 3, 5, 5]
        self.label_shape = [2, 5, 5]
1025 1026 1027 1028 1029 1030


class TestNLLLossOp2DNoReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
1031 1032
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
1033
        self.with_weight = False
1034
        np.random.seed(200)
1035 1036 1037
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
1038
        np.random.seed(200)
1039 1040 1041
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
1042 1043 1044 1045
        output_np = nll_loss_2d(input_np, label_np, reduction='none')
        total_weight_np = np.array([0]).astype('float64')
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
1046
            np.random.seed(200)
1047 1048 1049 1050 1051 1052
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_2d(
                input_np, label_np, weight=weight_np, reduction='none'
            )
1053 1054 1055 1056 1057 1058
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'none', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
1059
        self.check_output(check_eager=True)
1060 1061 1062

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
1063
        self.check_output(check_eager=True)
1064 1065 1066 1067

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
1068
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1069 1070
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
1071
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1072 1073 1074 1075 1076 1077

    def init_test_case(self):
        self.input_shape = [5, 3, 5, 5]
        self.label_shape = [5, 5, 5]


1078 1079 1080 1081 1082 1083
class TestNLLLossName(unittest.TestCase):
    def test_name(self):
        prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        place = paddle.CPUPlace()
        with paddle.static.program_guard(prog, startup_prog):
1084 1085
            x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
            label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
            nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss')
            res = nll_loss(x, label)
            self.assertTrue(res.name.startswith('nll_loss'))


class TestNLLLossInvalidArgs(unittest.TestCase):
    def test_x_dim_value_error(self):
        def test_x_dim_lt_2():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
                x = paddle.fluid.data(
                    name='x',
                    shape=[
                        10,
                    ],
                    dtype='float64',
                )
                label = paddle.fluid.data(
                    name='label',
                    shape=[
                        10,
                    ],
                    dtype='float64',
                )
1112 1113 1114 1115 1116 1117 1118
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_dim_lt_2)

        def test_x_dim_imperative_lt_2():
            with fluid.dygraph.guard():
1119 1120
                x_np = np.random.random(size=(5,)).astype(np.float64)
                label_np = np.random.randint(0, 10, size=(5,)).astype(np.int64)
1121 1122
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1123 1124 1125 1126 1127
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_dim_imperative_lt_2)

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
        def test_x_shape_lt_1():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
                array = np.array([], dtype=np.float32)
                x = paddle.to_tensor(np.reshape(array, [1, 0]), dtype='float32')
                label = paddle.to_tensor(
                    np.reshape(array, [1, 0]), dtype='int64'
                )
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_shape_lt_1)

        def test_x_dim_and_label_dim():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
                x_np = np.random.random(size=(5,)).astype(np.float64)
                label_np = np.random.randint(0, 10, size=(5, 1)).astype(
                    np.int64
                )
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_dim_and_label_dim)

1159 1160 1161 1162 1163 1164
    def test_reduction_value_error(self):
        def test_NLLLoss_reduction_not_sum_mean_none():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1165
                x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
1166 1167 1168
                label = paddle.fluid.data(
                    name='label', shape=[10], dtype='int64'
                )
1169 1170 1171 1172 1173 1174 1175
                nll_loss = paddle.nn.loss.NLLLoss(reduction='')
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_NLLLoss_reduction_not_sum_mean_none)

        def test_NLLLoss_reduction_imperative_not_sum_mean_none():
            with fluid.dygraph.guard():
1176
                x_np = np.random.random(size=(5, 3)).astype(np.float64)
1177
                label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64)
1178 1179
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1180 1181 1182
                nll_loss = paddle.nn.loss.NLLLoss(reduction='')
                res = nll_loss(x, label)

1183 1184 1185
        self.assertRaises(
            ValueError, test_NLLLoss_reduction_imperative_not_sum_mean_none
        )
1186 1187 1188 1189 1190 1191

        def test_nll_loss_function_reduction_not_sum_mean_none():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1192
                x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
1193 1194 1195
                label = paddle.fluid.data(
                    name='label', shape=[10], dtype='int64'
                )
1196 1197
                res = paddle.nn.functional.nll_loss(x, label, reduction='')

1198 1199 1200
        self.assertRaises(
            ValueError, test_nll_loss_function_reduction_not_sum_mean_none
        )
1201 1202 1203

        def test_nll_loss_function_reduction_imperative_not_sum_mean_none():
            with fluid.dygraph.guard():
1204
                x_np = np.random.random(size=(5, 3)).astype(np.float64)
1205
                label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64)
1206 1207
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1208 1209 1210 1211
                res = paddle.nn.functional.nll_loss(x, label, reduction='')

        self.assertRaises(
            ValueError,
1212 1213
            test_nll_loss_function_reduction_imperative_not_sum_mean_none,
        )
1214 1215


1216
if __name__ == "__main__":
H
hong 已提交
1217
    paddle.enable_static()
1218
    unittest.main()