test_nll_loss.py 46.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
from op_test import OpTest
H
hong 已提交
20
from paddle.fluid.framework import _test_eager_guard
21 22


23 24 25 26
def nll_loss_1d(logs,
                targets,
                weight=None,
                reduction='mean',
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
                ignore_index=-100):
    input_shape = logs.shape
    N = input_shape[0]
    C = input_shape[1]
    out = np.zeros_like(targets).astype(np.float64)
    total_weight = 0
    for i in range(N):
        cur_target = targets[i]
        if cur_target == ignore_index:
            out[i] = 0
            continue
        cur_weight = weight[cur_target] if weight is not None else 1
        total_weight += cur_weight
        out[i] = -logs[i][cur_target] * cur_weight
    if reduction == 'sum':
        return np.sum(out), np.array([total_weight]).astype('float64')
    elif reduction == 'mean':
44 45
        return out.sum() / total_weight, np.array([total_weight
                                                   ]).astype('float64')
46 47 48 49
    elif reduction == 'none':
        return out


50 51 52 53
def nll_loss_2d(logs,
                targets,
                weight=None,
                reduction='mean',
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
                ignore_index=-100):
    input_shape = logs.shape
    N = input_shape[0]
    H = input_shape[2]
    W = input_shape[3]
    out = np.zeros_like(targets).astype(np.float64)
    total_weight = 0
    for i in range(N):
        for h in range(H):
            for w in range(W):
                cur_target = targets[i][h][w]
                if cur_target == ignore_index:
                    out[i][h][w] = 0
                    continue
                cur_weight = weight[cur_target] if weight is not None else 1
                total_weight += cur_weight
                out[i][h][w] = -logs[i][cur_target][h][w] * cur_weight
    if reduction == 'sum':
        return np.sum(out), np.array([total_weight]).astype('float64')
    elif reduction == 'mean':
74 75
        return out.sum() / total_weight, np.array([total_weight
                                                   ]).astype('float64')
76 77 78 79 80
    elif reduction == 'none':
        return out


class TestNLLLoss(unittest.TestCase):
81

82
    def test_NLLLoss_1D_mean(self):
83
        np.random.seed(200)
84
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
85
        np.random.seed(200)
86 87 88
        label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
89 90
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
91 92 93 94 95 96 97 98
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
99 100 101 102 103 104
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np
                                    },
                                    fetch_list=[res])
105 106 107

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
108 109
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
110 111
            dy_result = dy_res.numpy()

H
hong 已提交
112 113 114
        with fluid.dygraph.guard():
            with _test_eager_guard():
                nll_loss = paddle.nn.loss.NLLLoss()
115 116
                eager_res = nll_loss(paddle.to_tensor(input_np),
                                     paddle.to_tensor(label_np))
H
hong 已提交
117 118
                eager_result = eager_res.numpy()

119 120 121 122
        expected = nll_loss_1d(input_np, label_np)[0]
        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))
H
hong 已提交
123
        self.assertTrue(np.allclose(eager_result, expected))
124 125

    def test_NLLLoss_1D_sum(self):
126
        np.random.seed(200)
127
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
128
        np.random.seed(200)
129 130 131
        label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
132 133
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
134 135 136 137 138 139 140 141
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
142 143 144 145 146 147
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np
                                    },
                                    fetch_list=[res])
148 149 150

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
151 152
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
153 154
            dy_result = dy_res.numpy()

H
hong 已提交
155 156 157 158 159 160 161 162 163 164
            with _test_eager_guard():
                nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
                in_t = paddle.to_tensor(input_np)
                label = paddle.to_tensor(label_np)
                in_t.stop_gradient = False
                eager_res = nll_loss(in_t, label)
                eager_result = eager_res.numpy()
                loss = eager_res.sum()
                loss.backward()

165 166 167 168
        expected = nll_loss_1d(input_np, label_np, reduction='sum')[0]
        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))
H
hong 已提交
169
        self.assertTrue(np.allclose(eager_result, expected))
170 171

    def test_NLLLoss_1D_with_weight_mean(self):
172
        np.random.seed(200)
173
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
174
        np.random.seed(200)
175 176 177 178
        label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64)
        weight_np = np.random.random(size=(10, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
179 180
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
        # place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
200
                weight=paddle.to_tensor(weight_np))
201 202
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
203
            dy_result = dy_res.numpy()
H
hong 已提交
204 205 206 207

            with _test_eager_guard():
                nll_loss = paddle.nn.loss.NLLLoss(
                    weight=paddle.to_tensor(weight_np))
208 209
                eager_res = nll_loss(paddle.to_tensor(input_np),
                                     paddle.to_tensor(label_np))
H
hong 已提交
210 211 212 213
                loss = eager_res.sum()
                loss.backward()
                eager_result = eager_res.numpy()

214 215 216 217 218
        expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))
H
hong 已提交
219
        self.assertTrue(np.allclose(eager_result, expected))
220 221

    def test_NLLLoss_1D_with_weight_sum(self):
222
        np.random.seed(200)
223
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
224
        np.random.seed(200)
225 226 227 228
        label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64)
        weight_np = np.random.random(size=(10, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
229 230
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
        # place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
250
                weight=paddle.to_tensor(weight_np), reduction='sum')
251 252
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
253
            dy_result = dy_res.numpy()
254 255 256 257
        expected = nll_loss_1d(input_np,
                               label_np,
                               weight=weight_np,
                               reduction='sum')[0]
258 259 260 261 262 263

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_1D_with_weight_mean_cpu(self):
264
        np.random.seed(200)
265
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
266
        np.random.seed(200)
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
        label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64)
        weight_np = np.random.random(size=(10, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
290
                weight=paddle.to_tensor(weight_np))
291 292
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
293 294 295 296 297 298 299 300
            dy_result = dy_res.numpy()
        expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_1D_with_weight_no_reduce_cpu(self):
301
        np.random.seed(200)
302
        input_np = np.random.random(size=(10, 10)).astype(np.float64)
303
        np.random.seed(200)
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
        label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64)
        weight_np = np.random.random(size=(10, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
            input = fluid.data(name='input', shape=[10, 10], dtype='float64')
            label = fluid.data(name='label', shape=[10], dtype='int64')
            weight = fluid.data(name='weight', shape=[10], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
327
                weight=paddle.to_tensor(weight_np), reduction='none')
328 329
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
330
            dy_result = dy_res.numpy()
331 332 333 334
        expected = nll_loss_1d(input_np,
                               label_np,
                               weight=weight_np,
                               reduction='none')
335 336 337 338 339 340

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_2D_mean(self):
341
        np.random.seed(200)
342
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
343
        np.random.seed(200)
344 345 346
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
347 348
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
349 350
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
351 352 353
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5],
                               dtype='float64')
354 355 356 357 358
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
359 360 361 362 363 364
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np
                                    },
                                    fetch_list=[res])
365 366 367

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
368 369
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
370 371 372 373 374 375 376 377 378
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np)[0]

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_2D_sum(self):
379
        np.random.seed(200)
380
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
381
        np.random.seed(200)
382 383 384
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
385 386
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
387 388
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
389 390 391
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5],
                               dtype='float64')
392 393 394 395 396
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
397 398 399 400 401 402
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np
                                    },
                                    fetch_list=[res])
403 404 405

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(reduction='sum')
406 407
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
408 409 410 411 412 413 414 415 416
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, reduction='sum')[0]

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_2D_with_weight_mean(self):
417
        np.random.seed(200)
418
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
419
        np.random.seed(200)
420 421 422 423
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        weight_np = np.random.random(size=(3, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
424 425
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
426 427
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
428 429 430
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5],
                               dtype='float64')
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
448
                weight=paddle.to_tensor(weight_np))
449 450
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
451 452 453 454 455 456 457 458 459
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_2D_with_weight_mean_cpu(self):
460
        np.random.seed(200)
461
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
462
        np.random.seed(200)
463 464 465 466 467 468
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        weight_np = np.random.random(size=(3, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
469 470 471
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5],
                               dtype='float64')
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
489
                weight=paddle.to_tensor(weight_np))
490 491
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
492 493 494 495 496 497 498 499 500
            dy_result = dy_res.numpy()

        expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_2D_with_weight_sum(self):
501
        np.random.seed(200)
502
        input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64)
503
        np.random.seed(200)
504 505 506 507
        label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64)
        weight_np = np.random.random(size=(3, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
508 509
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
510
        with fluid.program_guard(prog, startup_prog):
511 512 513
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5],
                               dtype='float64')
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
            label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')

            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
531
                weight=paddle.to_tensor(weight_np), reduction='sum')
532 533
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
534 535
            dy_result = dy_res.numpy()

536 537 538 539
        expected = nll_loss_2d(input_np,
                               label_np,
                               weight=weight_np,
                               reduction='sum')[0]
540 541 542 543 544 545

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_in_dims_not_2or4_mean(self):
546
        np.random.seed(200)
547
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
548
        np.random.seed(200)
549 550 551
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
552 553
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
554 555
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
556 557 558
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5, 5],
                               dtype='float64')
559 560 561 562 563
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            nll_loss = paddle.nn.loss.NLLLoss()
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
564 565 566 567 568 569
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np
                                    },
                                    fetch_list=[res])
570 571 572

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss()
573 574
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
575 576 577 578 579 580 581 582 583 584 585 586 587 588
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
        input_np_reshape = np.reshape(input_np,
                                      (input_shape[0], input_shape[1], 1, -1))
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
        expected = nll_loss_2d(input_np_reshape, label_np_reshape)[0]

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self):
589
        np.random.seed(200)
590
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
591
        np.random.seed(200)
592 593 594 595
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
        weight_np = np.random.random(size=(3, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
596 597
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
598 599
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
600 601 602
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5, 5],
                               dtype='float64')
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight)
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
619
                weight=paddle.to_tensor(weight_np))
620 621
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
622 623 624 625 626 627 628
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
        input_np_reshape = np.reshape(input_np,
                                      (input_shape[0], input_shape[1], 1, -1))
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
629 630 631
        expected = nll_loss_2d(input_np_reshape,
                               label_np_reshape,
                               weight=weight_np)[0]
632 633 634 635 636 637

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self):
638
        np.random.seed(200)
639
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
640
        np.random.seed(200)
641 642 643 644
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
        weight_np = np.random.random(size=(3, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
645 646
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
647 648
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
649 650 651
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5, 5],
                               dtype='float64')
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
668
                weight=paddle.to_tensor(weight_np), reduction='sum')
669 670
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
671 672 673 674 675 676 677
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
        input_np_reshape = np.reshape(input_np,
                                      (input_shape[0], input_shape[1], 1, -1))
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
678 679 680 681
        expected = nll_loss_2d(input_np_reshape,
                               label_np_reshape,
                               weight=weight_np,
                               reduction='sum')[0]
682 683 684 685 686 687

        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self):
688
        np.random.seed(200)
689
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
690
        np.random.seed(200)
691 692 693 694
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
        weight_np = np.random.random(size=(3, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
695 696
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
697 698
        #place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
699 700 701
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5, 5],
                               dtype='float64')
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
718
                weight=paddle.to_tensor(weight_np), reduction='none')
719 720
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
721 722 723 724 725 726 727 728
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
        out_shape = (input_shape[0], ) + input_shape[2:]
        input_np_reshape = np.reshape(input_np,
                                      (input_shape[0], input_shape[1], 1, -1))
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
729 730 731 732
        expected = nll_loss_2d(input_np_reshape,
                               label_np_reshape,
                               weight=weight_np,
                               reduction='none')
733 734 735 736 737 738
        expected = np.reshape(expected, out_shape)
        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))

    def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self):
739
        np.random.seed(200)
740
        input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64)
741
        np.random.seed(200)
742 743 744 745 746 747
        label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64)
        weight_np = np.random.random(size=(3, )).astype(np.float64)
        prog = fluid.Program()
        startup_prog = fluid.Program()
        place = fluid.CPUPlace()
        with fluid.program_guard(prog, startup_prog):
748 749 750
            input = fluid.data(name='input',
                               shape=[5, 3, 5, 5, 5],
                               dtype='float64')
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
            label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64')
            weight = fluid.data(name='weight', shape=[3], dtype='float64')
            nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none')
            res = nll_loss(input, label)

            exe = fluid.Executor(place)
            static_result = exe.run(prog,
                                    feed={
                                        "input": input_np,
                                        "label": label_np,
                                        "weight": weight_np
                                    },
                                    fetch_list=[res])

        with fluid.dygraph.guard():
            nll_loss = paddle.nn.loss.NLLLoss(
H
hong 已提交
767
                weight=paddle.to_tensor(weight_np), reduction='none')
768 769
            dy_res = nll_loss(paddle.to_tensor(input_np),
                              paddle.to_tensor(label_np))
770 771 772 773 774 775 776 777
            dy_result = dy_res.numpy()

        input_shape = input_np.shape
        label_shape = label_np.shape
        out_shape = (input_shape[0], ) + input_shape[2:]
        input_np_reshape = np.reshape(input_np,
                                      (input_shape[0], input_shape[1], 1, -1))
        label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
778 779 780 781
        expected = nll_loss_2d(input_np_reshape,
                               label_np_reshape,
                               weight=weight_np,
                               reduction='none')
782 783 784 785 786 787 788
        expected = np.reshape(expected, out_shape)
        self.assertTrue(np.allclose(static_result, expected))
        self.assertTrue(np.allclose(static_result, dy_result))
        self.assertTrue(np.allclose(dy_result, expected))


class TestNLLLossOp1DWithReduce(OpTest):
789

790 791 792
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
793 794
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
795
        self.with_weight = False
H
hong 已提交
796 797
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
798
        np.random.seed(200)
799 800
        input_np = np.random.uniform(0.1, 0.8,
                                     self.input_shape).astype("float64")
801
        np.random.seed(200)
802 803 804 805 806
        label_np = np.random.randint(0, self.input_shape[1],
                                     self.label_shape).astype("int64")
        output_np, total_weight_np = nll_loss_1d(input_np, label_np)
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
807
            np.random.seed(200)
808 809
            weight_np = np.random.uniform(0.1, 0.8,
                                          self.input_shape[1]).astype("float64")
810 811 812
            output_np, total_weight_np = nll_loss_1d(input_np,
                                                     label_np,
                                                     weight=weight_np)
813 814 815 816 817 818
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'mean', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
819
        self.check_output(check_eager=True)
820 821 822

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
823
        self.check_output(check_eager=True)
824 825 826 827

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
828
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
829 830
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
831
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
832 833 834 835 836 837 838

    def init_test_case(self):
        self.input_shape = [10, 10]
        self.label_shape = [10]


class TestNLLLossOp1DNoReduce(OpTest):
839

840 841 842
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
843 844
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
845
        self.with_weight = False
846
        np.random.seed(200)
847 848
        input_np = np.random.uniform(0.1, 0.8,
                                     self.input_shape).astype("float64")
849
        np.random.seed(200)
850 851 852 853 854 855
        label_np = np.random.randint(0, self.input_shape[1],
                                     self.label_shape).astype("int64")
        output_np = nll_loss_1d(input_np, label_np, reduction='none')
        total_weight_np = np.array([0]).astype('float64')
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
856
            np.random.seed(200)
857 858
            weight_np = np.random.uniform(0.1, 0.8,
                                          self.input_shape[1]).astype("float64")
859 860 861 862
            output_np, total_weight_np = nll_loss_1d(input_np,
                                                     label_np,
                                                     weight=weight_np,
                                                     reduction='none')
863 864 865 866 867 868
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'none', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
869
        self.check_output(check_eager=True)
870 871 872

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
873
        self.check_output(check_eager=True)
874 875 876 877

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
878
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
879 880
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
881
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
882 883 884 885 886 887 888

    def init_test_case(self):
        self.input_shape = [10, 10]
        self.label_shape = [10]


class TestNLLLossOp2DWithReduce(OpTest):
889

890 891 892
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
893 894
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
895
        self.with_weight = False
896
        np.random.seed(200)
897 898
        input_np = np.random.uniform(0.1, 0.8,
                                     self.input_shape).astype("float64")
899
        np.random.seed(200)
900 901 902 903 904
        label_np = np.random.randint(0, self.input_shape[1],
                                     self.label_shape).astype("int64")
        output_np, total_weight_np = nll_loss_2d(input_np, label_np)
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
905
            np.random.seed(200)
906 907
            weight_np = np.random.uniform(0.1, 0.8,
                                          self.input_shape[1]).astype("float64")
908 909 910
            output_np, total_weight_np = nll_loss_2d(input_np,
                                                     label_np,
                                                     weight=weight_np)
911 912 913 914 915 916
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'mean', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
917
        self.check_output(check_eager=True)
918 919 920

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
921
        self.check_output(check_eager=True)
922 923 924 925

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
926
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
927 928
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
929
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
930 931

    def init_test_case(self):
932 933
        self.input_shape = [2, 3, 5, 5]
        self.label_shape = [2, 5, 5]
934 935 936


class TestNLLLossOp2DNoReduce(OpTest):
937

938 939 940
    def setUp(self):
        self.init_test_case()
        self.op_type = "nll_loss"
Z
zyfncg 已提交
941 942
        self.python_api = paddle.nn.functional.nll_loss
        self.python_out_sig = ["Out"]
943
        self.with_weight = False
944
        np.random.seed(200)
945 946
        input_np = np.random.uniform(0.1, 0.8,
                                     self.input_shape).astype("float64")
947
        np.random.seed(200)
948 949 950 951 952 953
        label_np = np.random.randint(0, self.input_shape[1],
                                     self.label_shape).astype("int64")
        output_np = nll_loss_2d(input_np, label_np, reduction='none')
        total_weight_np = np.array([0]).astype('float64')
        self.inputs = {'X': input_np, 'Label': label_np}
        if self.with_weight:
954
            np.random.seed(200)
955 956
            weight_np = np.random.uniform(0.1, 0.8,
                                          self.input_shape[1]).astype("float64")
957 958 959 960
            output_np, total_weight_np = nll_loss_2d(input_np,
                                                     label_np,
                                                     weight=weight_np,
                                                     reduction='none')
961 962 963 964 965 966
            self.inputs['Weight'] = weight_np

        self.outputs = {'Out': output_np, 'Total_weight': total_weight_np}
        self.attrs = {'reduction': 'none', 'ignore_index': -100}

    def test_check_output(self):
Z
zyfncg 已提交
967
        self.check_output(check_eager=True)
968 969 970

    def test_check_output_with_weight(self):
        self.with_weight = True
Z
zyfncg 已提交
971
        self.check_output(check_eager=True)
972 973 974 975

    def test_check_grad(self):
        self.with_weight = True
        place = fluid.CPUPlace()
Z
zyfncg 已提交
976
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
977 978
        if fluid.core.is_compiled_with_cuda():
            place = fluid.CUDAPlace(0)
Z
zyfncg 已提交
979
            self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
980 981 982 983 984 985

    def init_test_case(self):
        self.input_shape = [5, 3, 5, 5]
        self.label_shape = [5, 5, 5]


986
class TestNLLLossName(unittest.TestCase):
987

988 989 990 991 992
    def test_name(self):
        prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        place = paddle.CPUPlace()
        with paddle.static.program_guard(prog, startup_prog):
993 994
            x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
            label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
995 996 997 998 999 1000
            nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss')
            res = nll_loss(x, label)
            self.assertTrue(res.name.startswith('nll_loss'))


class TestNLLLossInvalidArgs(unittest.TestCase):
1001

1002
    def test_x_dim_value_error(self):
1003

1004 1005 1006 1007 1008
        def test_x_dim_lt_2():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1009 1010 1011 1012 1013 1014 1015 1016
                x = paddle.fluid.data(name='x', shape=[
                    10,
                ], dtype='float64')
                label = paddle.fluid.data(name='label',
                                          shape=[
                                              10,
                                          ],
                                          dtype='float64')
1017 1018 1019 1020 1021 1022 1023
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_dim_lt_2)

        def test_x_dim_imperative_lt_2():
            with fluid.dygraph.guard():
1024 1025
                x_np = np.random.random(size=(5, )).astype(np.float64)
                label_np = np.random.randint(0, 10, size=(5, )).astype(np.int64)
1026 1027
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1028 1029 1030 1031 1032 1033
                nll_loss = paddle.nn.loss.NLLLoss()
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_x_dim_imperative_lt_2)

    def test_reduction_value_error(self):
1034

1035 1036 1037 1038 1039
        def test_NLLLoss_reduction_not_sum_mean_none():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1040
                x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
1041 1042 1043
                label = paddle.fluid.data(name='label',
                                          shape=[10],
                                          dtype='int64')
1044 1045 1046 1047 1048 1049 1050
                nll_loss = paddle.nn.loss.NLLLoss(reduction='')
                res = nll_loss(x, label)

        self.assertRaises(ValueError, test_NLLLoss_reduction_not_sum_mean_none)

        def test_NLLLoss_reduction_imperative_not_sum_mean_none():
            with fluid.dygraph.guard():
1051 1052
                x_np = np.random.random(size=(5, 3)).astype(np.float64)
                label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64)
1053 1054
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
                nll_loss = paddle.nn.loss.NLLLoss(reduction='')
                res = nll_loss(x, label)

        self.assertRaises(ValueError,
                          test_NLLLoss_reduction_imperative_not_sum_mean_none)

        def test_nll_loss_function_reduction_not_sum_mean_none():
            prog = paddle.static.Program()
            startup_prog = paddle.static.Program()
            place = paddle.CPUPlace()
            with paddle.static.program_guard(prog, startup_prog):
1066
                x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
1067 1068 1069
                label = paddle.fluid.data(name='label',
                                          shape=[10],
                                          dtype='int64')
1070 1071 1072 1073 1074 1075 1076
                res = paddle.nn.functional.nll_loss(x, label, reduction='')

        self.assertRaises(ValueError,
                          test_nll_loss_function_reduction_not_sum_mean_none)

        def test_nll_loss_function_reduction_imperative_not_sum_mean_none():
            with fluid.dygraph.guard():
1077 1078
                x_np = np.random.random(size=(5, 3)).astype(np.float64)
                label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64)
1079 1080
                x = paddle.to_tensor(x_np)
                label = paddle.to_tensor(label_np)
1081 1082 1083 1084 1085 1086 1087
                res = paddle.nn.functional.nll_loss(x, label, reduction='')

        self.assertRaises(
            ValueError,
            test_nll_loss_function_reduction_imperative_not_sum_mean_none)


1088
if __name__ == "__main__":
H
hong 已提交
1089
    paddle.enable_static()
1090
    unittest.main()