test_nll_loss.py 44.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
from op_test import OpTest
H
hong 已提交
20
from paddle.fluid.framework import _test_eager_guard
21 22


23 24 25
def nll_loss_1d(
    logs, targets, weight=None, reduction='mean', ignore_index=-100
):
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
    input_shape = logs.shape
    N = input_shape[0]
    C = input_shape[1]
    out = np.zeros_like(targets).astype(np.float64)
    total_weight = 0
    for i in range(N):
        cur_target = targets[i]
        if cur_target == ignore_index:
            out[i] = 0
            continue
        cur_weight = weight[cur_target] if weight is not None else 1
        total_weight += cur_weight
        out[i] = -logs[i][cur_target] * cur_weight
    if reduction == 'sum':
        return np.sum(out), np.array([total_weight]).astype('float64')
    elif reduction == 'mean':
42 43 44
        return out.sum() / total_weight, np.array([total_weight]).astype(
            'float64'
        )
45 46 47 48
    elif reduction == 'none':
        return out


49 50 51
def nll_loss_2d(
    logs, targets, weight=None, reduction='mean', ignore_index=-100
):
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
    input_shape = logs.shape
    N = input_shape[0]
    H = input_shape[2]
    W = input_shape[3]
    out = np.zeros_like(targets).astype(np.float64)
    total_weight = 0
    for i in range(N):
        for h in range(H):
            for w in range(W):
                cur_target = targets[i][h][w]
                if cur_target == ignore_index:
                    out[i][h][w] = 0
                    continue
                cur_weight = weight[cur_target] if weight is not None else 1
                total_weight += cur_weight
                out[i][h][w] = -logs[i][cur_target][h][w] * cur_weight
    if reduction == 'sum':
        return np.sum(out), np.array([total_weight]).astype('float64')
    elif reduction == 'mean':
71 72 73
        return out.sum() / total_weight, np.array([total_weight]).astype(
            'float64'
        )
74 75 76 77 78 79
    elif reduction == 'none':
        return out


class TestNLLLoss(unittest.TestCase):
    def test_NLLLoss_1D_mean(self):
80
        np.random.seed(200)
81
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
82
        np.random.seed(200)
83
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
84 85
        prog = fluid.Program()
        startup_prog = fluid.Program()
86 87 88 89 90 91
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
92 93 94 95 96 97 98
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
99 100 101 102 103
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
104 105 106

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
107 108 109
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
110 111
            dy_result = dy_res.numpy()

H
hong 已提交
112 113 114
        with fluid.dygraph.guard():
            with _test_eager_guard():
                nll_loss = paddle.nn.loss.NLLLoss()
115 116 117
                eager_res = nll_loss(
                    paddle.to_tensor(input_np), paddle.to_tensor(label_np)
                )
H
hong 已提交
118 119
                eager_result = eager_res.numpy()

120
        expected = nll_loss_1d(input_np, label_np)[0]
121 122 123 124
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
        np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
125 126

    def test_NLLLoss_1D_sum(self):
127
        np.random.seed(200)
128
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
129
        np.random.seed(200)
130
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
131 132
        prog = fluid.Program()
        startup_prog = fluid.Program()
133 134 135 136 137 138
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
139 140 141 142 143 144 145
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
146 147 148 149 150
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
151 152 153

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
154 155 156
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
157 158
            dy_result = dy_res.numpy()

H
hong 已提交
159 160 161 162 163 164 165 166 167 168
            with _test_eager_guard():
                nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
                in_t = paddle.to_tensor(input_np)
                label = paddle.to_tensor(label_np)
                in_t.stop_gradient = False
                eager_res = nll_loss(in_t, label)
                eager_result = eager_res.numpy()
                loss = eager_res.sum()
                loss.backward()

169
        expected = nll_loss_1d(input_np, label_np, reduction='sum')[0]
170 171 172 173
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
        np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
174 175

    def test_NLLLoss_1D_with_weight_mean(self):
176
        np.random.seed(200)
177
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
178
        np.random.seed(200)
179 180
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
181 182
        prog = fluid.Program()
        startup_prog = fluid.Program()
183 184 185 186 187
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
188 189 190 191 192 193 194 195 196
        # place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
197 198 199 200 201 202 203 204 205
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
206 207 208

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
209 210 211 212 213
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
214
            dy_result = dy_res.numpy()
H
hong 已提交
215 216 217

            with _test_eager_guard():
                nll_loss = paddle.nn.loss.NLLLoss(
218 219 220 221 222
                    weight=paddle.to_tensor(weight_np)
                )
                eager_res = nll_loss(
                    paddle.to_tensor(input_np), paddle.to_tensor(label_np)
                )
H
hong 已提交
223 224 225 226
                loss = eager_res.sum()
                loss.backward()
                eager_result = eager_res.numpy()

227 228
        expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]

229 230 231 232
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
        np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
233 234

    def test_NLLLoss_1D_with_weight_sum(self):
235
        np.random.seed(200)
236
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
237
        np.random.seed(200)
238 239
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
240 241
        prog = fluid.Program()
        startup_prog = fluid.Program()
242 243 244 245 246
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
247 248 249 250 251 252 253 254 255
        # place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
256 257 258 259 260 261 262 263 264
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
265 266 267

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
268 269 270 271 272
                weight=paddle.to_tensor(weight_np), reduction='sum'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
273
            dy_result = dy_res.numpy()
274 275 276
        expected = nll_loss_1d(
            input_np, label_np, weight=weight_np, reduction='sum'
        )[0]
277

278 279 280
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
281 282

    def test_NLLLoss_1D_with_weight_mean_cpu(self):
283
        np.random.seed(200)
284
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
285
        np.random.seed(200)
286 287
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
288 289 290 291 292 293 294 295 296 297 298
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
299 300 301 302 303 304 305 306 307
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
308 309 310

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
311 312 313 314 315
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
316 317 318
            dy_result = dy_res.numpy()
        expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]

319 320 321
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
322 323

    def test_NLLLoss_1D_with_weight_no_reduce_cpu(self):
324
        np.random.seed(200)
325
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
326
        np.random.seed(200)
327 328
        label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64)
        weight_np = np.random.random(size=(10,)).astype(np.float64)
329 330 331 332 333 334 335 336 337 338 339
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
340 341 342 343 344 345 346 347 348
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
349 350 351

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
352 353 354 355 356
                weight=paddle.to_tensor(weight_np), reduction='none'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
357
            dy_result = dy_res.numpy()
358 359 360
        expected = nll_loss_1d(
            input_np, label_np, weight=weight_np, reduction='none'
        )
361

362 363 364
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
365 366

    def test_NLLLoss_2D_mean(self):
367
        np.random.seed(200)
368
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
369
        np.random.seed(200)
370 371 372
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
373 374 375 376 377 378
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
379
        with fluid.program_guard(prog, startup_prog):
380 381 382
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
383 384 385 386 387
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
388 389 390 391 392
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
393 394 395

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
396 397 398
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
399 400 401 402
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np)[0]

403 404 405
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
406 407

    def test_NLLLoss_2D_sum(self):
408
        np.random.seed(200)
409
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
410
        np.random.seed(200)
411 412 413
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
414 415 416 417 418 419
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
420
        with fluid.program_guard(prog, startup_prog):
421 422 423
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
424 425 426 427 428
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
429 430 431 432 433
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
434 435 436

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
437 438 439
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
440 441 442 443
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, reduction='sum')[0]

444 445 446
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
447 448

    def test_NLLLoss_2D_with_weight_mean(self):
449
        np.random.seed(200)
450
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
451
        np.random.seed(200)
452
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
453
        weight_np = np.random.random(size=(3,)).astype(np.float64)
454 455
        prog = fluid.Program()
        startup_prog = fluid.Program()
456 457 458 459 460 461
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
462
        with fluid.program_guard(prog, startup_prog):
463 464 465
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
466 467 468 469 470 471 472
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
473 474 475 476 477 478 479 480 481
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
482 483 484

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
485 486 487 488 489
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
490 491 492 493
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]

494 495 496
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
497 498

    def test_NLLLoss_2D_with_weight_mean_cpu(self):
499
        np.random.seed(200)
500
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
501
        np.random.seed(200)
502
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
503
        weight_np = np.random.random(size=(3,)).astype(np.float64)
504 505 506 507
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
508 509 510
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
511 512 513 514 515 516 517
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
518 519 520 521 522 523 524 525 526
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
527 528 529

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
530 531 532 533 534
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
535 536 537 538
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]

539 540 541
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
542 543

    def test_NLLLoss_2D_with_weight_sum(self):
544
        np.random.seed(200)
545
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
546
        np.random.seed(200)
547
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
548
        weight_np = np.random.random(size=(3,)).astype(np.float64)
549 550
        prog = fluid.Program()
        startup_prog = fluid.Program()
551 552 553 554 555
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
556
        with fluid.program_guard(prog, startup_prog):
557 558 559
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5], dtype='float64'
            )
560 561 562 563 564 565 566
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
567 568 569 570 571 572 573 574 575
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
576 577 578

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
579 580 581 582 583
                weight=paddle.to_tensor(weight_np), reduction='sum'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
584 585
            dy_result = dy_res.numpy()

586 587 588
        expected = nll_loss_2d(
            input_np, label_np, weight=weight_np, reduction='sum'
        )[0]
589

590 591 592
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
593 594

    def test_NLLLoss_in_dims_not_2or4_mean(self):
595
        np.random.seed(200)
596
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
597
        np.random.seed(200)
598 599 600
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
601 602 603 604 605 606
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
607
        with fluid.program_guard(prog, startup_prog):
608 609 610
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
611 612 613 614 615
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
616 617 618 619 620
            (static_result,) = exe.run(
                prog,
                feed={"input": input_np, "label": label_np},
                fetch_list=[res],
            )
621 622 623

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
624 625 626
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
627 628 629 630
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
631 632 633
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
634 635 636
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
        expected = nll_loss_2d(input_np_reshape, label_np_reshape)[0]

637 638 639
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
640 641

    def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self):
642
        np.random.seed(200)
643
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
644
        np.random.seed(200)
645
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
646
        weight_np = np.random.random(size=(3,)).astype(np.float64)
647 648
        prog = fluid.Program()
        startup_prog = fluid.Program()
649 650 651 652 653 654
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
655
        with fluid.program_guard(prog, startup_prog):
656 657 658
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
659 660 661 662 663 664
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
665 666 667 668 669 670 671 672 673
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
674 675 676

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
677 678 679 680 681
                weight=paddle.to_tensor(weight_np)
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
682 683 684 685
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
686 687 688
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
689
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
690 691 692
        expected = nll_loss_2d(
            input_np_reshape, label_np_reshape, weight=weight_np
        )[0]
693

694 695 696
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
697 698

    def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self):
699
        np.random.seed(200)
700
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
701
        np.random.seed(200)
702
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
703
        weight_np = np.random.random(size=(3,)).astype(np.float64)
704 705
        prog = fluid.Program()
        startup_prog = fluid.Program()
706 707 708 709 710
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
711 712
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
713 714 715
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
716 717 718 719 720 721
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
722 723 724 725 726 727 728 729 730
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
731 732 733

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
734 735 736 737 738
                weight=paddle.to_tensor(weight_np), reduction='sum'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
739 740 741 742
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
743 744 745
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
746
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
747 748 749 750 751 752
        expected = nll_loss_2d(
            input_np_reshape,
            label_np_reshape,
            weight=weight_np,
            reduction='sum',
        )[0]
753

754 755 756
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
757 758

    def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self):
759
        np.random.seed(200)
760
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
761
        np.random.seed(200)
762
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
763
        weight_np = np.random.random(size=(3,)).astype(np.float64)
764 765
        prog = fluid.Program()
        startup_prog = fluid.Program()
766 767 768 769 770 771
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
        # place = fluid.CPUPlace()
772
        with fluid.program_guard(prog, startup_prog):
773 774 775
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
776 777 778 779 780 781
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
782 783 784 785 786 787 788 789 790
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
791 792 793

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
794 795 796 797 798
                weight=paddle.to_tensor(weight_np), reduction='none'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
799 800 801 802
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
803 804 805 806
        out_shape = (input_shape[0],) + input_shape[2:]
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
807
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
808 809 810 811 812 813
        expected = nll_loss_2d(
            input_np_reshape,
            label_np_reshape,
            weight=weight_np,
            reduction='none',
        )
814
        expected = np.reshape(expected, out_shape)
815 816 817
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
818 819

    def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self):
820
        np.random.seed(200)
821
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
822
        np.random.seed(200)
823
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
824
        weight_np = np.random.random(size=(3,)).astype(np.float64)
825 826 827 828
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
829 830 831
            input = fluid.data(
                name='input', shape=[5, 3, 5, 5, 5], dtype='float64'
            )
832 833 834 835 836 837
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
838 839 840 841 842 843 844 845 846
            (static_result,) = exe.run(
                prog,
                feed={
                    "input": input_np,
                    "label": label_np,
                    "weight": weight_np,
                },
                fetch_list=[res],
            )
847 848 849

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
850 851 852 853 854
                weight=paddle.to_tensor(weight_np), reduction='none'
            )
            dy_res = nll_loss(
                paddle.to_tensor(input_np), paddle.to_tensor(label_np)
            )
855 856 857 858
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
859 860 861 862
        out_shape = (input_shape[0],) + input_shape[2:]
        input_np_reshape = np.reshape(
            input_np, (input_shape[0], input_shape[1], 1, -1)
        )
863
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
864 865 866 867 868 869
        expected = nll_loss_2d(
            input_np_reshape,
            label_np_reshape,
            weight=weight_np,
            reduction='none',
        )
870
        expected = np.reshape(expected, out_shape)
871 872 873
        np.testing.assert_allclose(static_result, expected, rtol=1e-05)
        np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
        np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
874 875 876 877 878 879


class TestNLLLossOp1DWithReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
880 881
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
882
        self.with_weight = False
H
hong 已提交
883 884
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
885
        np.random.seed(200)
886 887 888
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
889
        np.random.seed(200)
890 891 892
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
893 894 895
        output_np, total_weight_np = nll_loss_1d(input_np, label_np)
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
896
            np.random.seed(200)
897 898 899 900 901 902
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_1d(
                input_np, label_np, weight=weight_np
            )
903 904 905 906 907 908
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'mean', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
909
        self.check_output(check_eager=True)
910 911 912

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
913
        self.check_output(check_eager=True)
914 915 916 917

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
918
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
919 920
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
921
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
922 923 924 925 926 927 928 929 930 931

    def init_test_case(self):
        self.input_shape = [10, 10]
        self.label_shape = [10]


class TestNLLLossOp1DNoReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
932 933
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
934
        self.with_weight = False
935
        np.random.seed(200)
936 937 938
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
939
        np.random.seed(200)
940 941 942
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
943 944 945 946
        output_np = nll_loss_1d(input_np, label_np, reduction='none')
        total_weight_np = np.array([0]).astype('float64')
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
947
            np.random.seed(200)
948 949 950 951 952 953
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_1d(
                input_np, label_np, weight=weight_np, reduction='none'
            )
954 955 956 957 958 959
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'none', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
960
        self.check_output(check_eager=True)
961 962 963

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
964
        self.check_output(check_eager=True)
965 966 967 968

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
969
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
970 971
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
972
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
973 974 975 976 977 978 979 980 981 982

    def init_test_case(self):
        self.input_shape = [10, 10]
        self.label_shape = [10]


class TestNLLLossOp2DWithReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
983 984
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
985
        self.with_weight = False
986
        np.random.seed(200)
987 988 989
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
990
        np.random.seed(200)
991 992 993
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
994 995 996
        output_np, total_weight_np = nll_loss_2d(input_np, label_np)
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
997
            np.random.seed(200)
998 999 1000 1001 1002 1003
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_2d(
                input_np, label_np, weight=weight_np
            )
1004 1005 1006 1007 1008 1009
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'mean', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
1010
        self.check_output(check_eager=True)
1011 1012 1013

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
1014
        self.check_output(check_eager=True)
1015 1016 1017 1018

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
1019
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1020 1021
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
1022
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1023 1024

    def init_test_case(self):
1025 1026
        self.input_shape = [2, 3, 5, 5]
        self.label_shape = [2, 5, 5]
1027 1028 1029 1030 1031 1032


class TestNLLLossOp2DNoReduce(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
1033 1034
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
1035
        self.with_weight = False
1036
        np.random.seed(200)
1037 1038 1039
        input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype(
            "float64"
        )
1040
        np.random.seed(200)
1041 1042 1043
        label_np = np.random.randint(
            0, self.input_shape[1], self.label_shape
        ).astype("int64")
1044 1045 1046 1047
        output_np = nll_loss_2d(input_np, label_np, reduction='none')
        total_weight_np = np.array([0]).astype('float64')
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
1048
            np.random.seed(200)
1049 1050 1051 1052 1053 1054
            weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype(
                "float64"
            )
            output_np, total_weight_np = nll_loss_2d(
                input_np, label_np, weight=weight_np, reduction='none'
            )
1055 1056 1057 1058 1059 1060
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'none', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
1061
        self.check_output(check_eager=True)
1062 1063 1064

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
1065
        self.check_output(check_eager=True)
1066 1067 1068 1069

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
1070
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1071 1072
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
1073
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1074 1075 1076 1077 1078 1079

    def init_test_case(self):
        self.input_shape = [5, 3, 5, 5]
        self.label_shape = [5, 5, 5]


1080 1081 1082 1083 1084 1085
class TestNLLLossName(unittest.TestCase):
    def test_name(self):
        prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        place = paddle.CPUPlace()
        with paddle.static.program_guard(prog, startup_prog):
1086 1087
            x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
            label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
            nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss')
            res = nll_loss(x, label)
            self.assertTrue(res.name.startswith('nll_loss'))


class TestNLLLossInvalidArgs(unittest.TestCase):
    def test_x_dim_value_error(self):
        def test_x_dim_lt_2():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
                x = paddle.fluid.data(
                    name='x',
                    shape=[
                        10,
                    ],
                    dtype='float64',
                )
                label = paddle.fluid.data(
                    name='label',
                    shape=[
                        10,
                    ],
                    dtype='float64',
                )
1114 1115 1116 1117 1118 1119 1120
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_dim_lt_2)

        def test_x_dim_imperative_lt_2():
            with fluid.dygraph.guard():
1121 1122
                x_np = np.random.random(size=(5,)).astype(np.float64)
                label_np = np.random.randint(0, 10, size=(5,)).astype(np.int64)
1123 1124
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_dim_imperative_lt_2)

    def test_reduction_value_error(self):
        def test_NLLLoss_reduction_not_sum_mean_none():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1136
                x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
1137 1138 1139
                label = paddle.fluid.data(
                    name='label', shape=[10], dtype='int64'
                )
1140 1141 1142 1143 1144 1145 1146
                nll_loss = paddle.nn.loss.NLLLoss(reduction='')
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_NLLLoss_reduction_not_sum_mean_none)

        def test_NLLLoss_reduction_imperative_not_sum_mean_none():
            with fluid.dygraph.guard():
1147
                x_np = np.random.random(size=(5, 3)).astype(np.float64)
1148
                label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64)
1149 1150
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1151 1152 1153
                nll_loss = paddle.nn.loss.NLLLoss(reduction='')
                res = nll_loss(x, label)

1154 1155 1156
        self.assertRaises(
            ValueError, test_NLLLoss_reduction_imperative_not_sum_mean_none
        )
1157 1158 1159 1160 1161 1162

        def test_nll_loss_function_reduction_not_sum_mean_none():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1163
                x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
1164 1165 1166
                label = paddle.fluid.data(
                    name='label', shape=[10], dtype='int64'
                )
1167 1168
                res = paddle.nn.functional.nll_loss(x, label, reduction='')

1169 1170 1171
        self.assertRaises(
            ValueError, test_nll_loss_function_reduction_not_sum_mean_none
        )
1172 1173 1174

        def test_nll_loss_function_reduction_imperative_not_sum_mean_none():
            with fluid.dygraph.guard():
1175
                x_np = np.random.random(size=(5, 3)).astype(np.float64)
1176
                label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64)
1177 1178
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1179 1180 1181 1182
                res = paddle.nn.functional.nll_loss(x, label, reduction='')

        self.assertRaises(
            ValueError,
1183 1184
            test_nll_loss_function_reduction_imperative_not_sum_mean_none,
        )
1185 1186


1187
if __name__ == "__main__":
H
hong 已提交
1188
    paddle.enable_static()
1189
    unittest.main()