test_dropout_op.py 47.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest
import numpy as np
K
Kexin Zhao 已提交
17
import paddle.fluid.core as core
18
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
19
import paddle
20
import paddle.static as static
21 22
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
23
from paddle.fluid.framework import _test_eager_guard, _enable_legacy_dygraph
24
import os
25

26
from paddle import _C_ops, _legacy_C_ops
H
hong 已提交
27

28

29
class TestDropoutOp(OpTest):
30

31
    def setUp(self):
32
        self.op_type = "dropout"
33
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
34
        self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
Y
Yu Yang 已提交
35 36
        self.outputs = {
            'Out': self.inputs['X'],
Z
Zeng Jinle 已提交
37
            'Mask': np.ones((32, 64)).astype('uint8')
Y
Yu Yang 已提交
38
        }
39

40 41 42 43
    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
44
        self.check_grad(['X'], 'Out')
45 46


47
class TestDropoutOpInput1d(OpTest):
48

49 50
    def setUp(self):
        self.op_type = "dropout"
51
        self.inputs = {'X': np.random.random((2000, )).astype("float32")}
52 53 54 55 56 57 58 59 60 61 62 63 64
        self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
        self.outputs = {
            'Out': self.inputs['X'],
            'Mask': np.ones((2000)).astype('uint8')
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['X'], 'Out')


65
class TestDropoutOp2(TestDropoutOp):
66

67
    def setUp(self):
68
        self.op_type = "dropout"
69
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
70
        self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False}
Y
Yu Yang 已提交
71 72
        self.outputs = {
            'Out': np.zeros((32, 64)).astype('float32'),
Z
Zeng Jinle 已提交
73
            'Mask': np.zeros((32, 64)).astype('uint8')
Y
Yu Yang 已提交
74
        }
75 76


77
class TestDropoutOp3(TestDropoutOp):
78

79
    def setUp(self):
80 81
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
82
        self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
Y
Yu Yang 已提交
83 84
        self.outputs = {
            'Out': self.inputs['X'],
Z
Zeng Jinle 已提交
85
            'Mask': np.ones((32, 64, 2)).astype('uint8')
Y
Yu Yang 已提交
86
        }
87 88


89
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
90
class TestDropoutOp4(OpTest):
91

92 93 94
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
95
        self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True}
96 97 98
        self.outputs = {
            'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob'])
        }
99 100 101 102 103

    def test_check_output(self):
        self.check_output()


104
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
105
class TestDropoutOp5(OpTest):
106

107 108 109
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
110
        self.attrs = {'dropout_prob': 0.75, 'is_test': True}
111 112 113
        self.outputs = {
            'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob'])
        }
114 115

    def test_check_output(self):
P
phlrain 已提交
116 117 118 119
        self.check_output()


class TestDropoutOp6(TestDropoutOp):
120

P
phlrain 已提交
121 122 123 124 125 126 127
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
        self.attrs = {
            'dropout_prob': 1.0,
            'fix_seed': True,
            'is_test': False,
P
phlrain 已提交
128
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
129 130 131
        }
        self.outputs = {
            'Out': np.zeros((32, 64)).astype('float32'),
Z
Zeng Jinle 已提交
132
            'Mask': np.zeros((32, 64)).astype('uint8')
P
phlrain 已提交
133 134 135 136
        }


class TestDropoutOp7(TestDropoutOp):
137

P
phlrain 已提交
138 139 140 141 142 143 144
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
        self.attrs = {
            'dropout_prob': 0.0,
            'fix_seed': True,
            'is_test': False,
P
phlrain 已提交
145
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
146 147 148
        }
        self.outputs = {
            'Out': self.inputs['X'],
Z
Zeng Jinle 已提交
149
            'Mask': np.ones((32, 64, 2)).astype('uint8')
P
phlrain 已提交
150 151 152
        }


153
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
P
phlrain 已提交
154
class TestDropoutOp8(OpTest):
155

P
phlrain 已提交
156 157 158 159 160 161 162
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
        self.attrs = {
            'dropout_prob': 0.35,
            'fix_seed': True,
            'is_test': True,
P
phlrain 已提交
163
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
164 165 166 167 168 169 170
        }
        self.outputs = {'Out': self.inputs['X']}

    def test_check_output(self):
        self.check_output()


171
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
P
phlrain 已提交
172
class TestDropoutOp9(OpTest):
173

P
phlrain 已提交
174 175 176 177 178 179
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
        self.attrs = {
            'dropout_prob': 0.75,
            'is_test': True,
P
phlrain 已提交
180
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
181 182 183 184
        }
        self.outputs = {'Out': self.inputs['X']}

    def test_check_output(self):
185 186 187
        self.check_output()


M
mapingshuo 已提交
188
class TestDropoutOpWithSeed(OpTest):
189

M
mapingshuo 已提交
190 191 192 193
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {
            "X": np.random.random((32, 64)).astype("float32"),
194 195 196 197
            "Seed": np.asarray([125], dtype="int32")
        }
        self.attrs = {
            'dropout_prob': 0.0,
M
mapingshuo 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210
        }
        self.outputs = {
            'Out': self.inputs['X'],
            'Mask': np.ones((32, 64)).astype('uint8')
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.05)


211 212 213 214
@unittest.skipIf(not core.is_compiled_with_cuda()
                 or not core.op_support_gpu("dropout"),
                 "core is not compiled with CUDA or core is not support dropout"
                 )
215
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
K
Kexin Zhao 已提交
216
class TestFP16DropoutOp(OpTest):
217

K
Kexin Zhao 已提交
218 219
    def setUp(self):
        self.op_type = "dropout"
K
Kexin Zhao 已提交
220 221 222 223
        self.init_test_case()

        x = np.random.random(self.input_size).astype("float16")
        out = x * (1.0 - self.prob)
K
Kexin Zhao 已提交
224
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
K
Kexin Zhao 已提交
225 226 227 228 229
        self.attrs = {
            'dropout_prob': self.prob,
            'fix_seed': self.fix_seed,
            'is_test': True
        }
230
        self.outputs = {'Out': out}
K
Kexin Zhao 已提交
231

K
Kexin Zhao 已提交
232 233 234 235 236
    def init_test_case(self):
        self.input_size = [32, 64]
        self.prob = 0.35
        self.fix_seed = True

K
Kexin Zhao 已提交
237
    def test_check_output(self):
238
        self.check_output_with_place(core.CUDAPlace(0), atol=1e-3)
K
Kexin Zhao 已提交
239 240


241 242 243 244
@unittest.skipIf(not core.is_compiled_with_cuda()
                 or not core.op_support_gpu("dropout"),
                 "core is not compiled with CUDA or core is not support dropout"
                 )
245
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
K
Kexin Zhao 已提交
246
class TestFP16DropoutOp2(TestFP16DropoutOp):
247

K
Kexin Zhao 已提交
248 249 250 251
    def init_test_case(self):
        self.input_size = [32, 64, 3]
        self.prob = 0.75
        self.fix_seed = False
K
Kexin Zhao 已提交
252 253


254
class TestBF16DropoutOp(OpTest):
255

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
    def setUp(self):
        self.op_type = "dropout"
        self.dtype = np.uint16

        x = np.random.random((32, 64)).astype("float32")
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False}
        self.outputs = {
            'Out':
            convert_float_to_uint16(np.zeros((32, 64)).astype('float32')),
            'Mask': np.zeros((32, 64)).astype('uint8')
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['X'], 'Out')


276
class TestDropoutOpWithSeedOnCPUPlace(unittest.TestCase):
277

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
    def test_seed_cpu_place(self):
        paddle.enable_static()
        main_program = Program()
        with program_guard(main_program):
            seed_input_name = "tensor@SeedInput"
            x_var_name = "tensor@X"
            x_out_var = "tensor@XOut"

            mask_var_name = "tensor@Mask"
            seed_input_var = main_program.global_block().create_var(
                name=seed_input_name,
                shape=[1],
                dtype='int32',
                persistable=False,
                stop_gradient=True)
            x_out_var = main_program.global_block().create_var(
                name=x_out_var,
                shape=[40, 40],
                dtype='float32',
                persistable=False,
                stop_gradient=True)
299 300 301 302 303
            x_var = main_program.global_block().create_var(name=x_var_name,
                                                           shape=[40, 40],
                                                           dtype='float32',
                                                           persistable=False,
                                                           stop_gradient=True)
304 305 306 307 308 309 310
            mask_var = main_program.global_block().create_var(
                name=mask_var_name,
                shape=[1],
                dtype='int',
                persistable=False,
                stop_gradient=True)

311 312 313 314 315 316 317 318
            main_program.global_block().append_op(type="fill_constant",
                                                  outputs={"Out": x_var_name},
                                                  attrs={
                                                      "shape": [40, 40],
                                                      "dtype": x_var.dtype,
                                                      "value": 1.0,
                                                      "place_type": 0
                                                  })
319 320 321 322
            main_program.global_block().append_op(
                type='seed',
                inputs={},
                outputs={'Out': seed_input_var},
323 324 325 326 327 328 329 330 331 332 333 334 335 336
                attrs={
                    'seed': 1,
                    'force_cpu': True
                })
            main_program.global_block().append_op(type='dropout',
                                                  inputs={
                                                      'X': x_var,
                                                      'Seed': seed_input_var
                                                  },
                                                  attrs={'dropout_prob': 0.},
                                                  outputs={
                                                      'Out': x_out_var,
                                                      'Mask': mask_var
                                                  })
337 338 339 340 341 342 343 344 345
            place = fluid.CPUPlace()
            if core.is_compiled_with_cuda():
                place = fluid.CUDAPlace(0)
            exe = fluid.Executor(place)
            x_out, mask_out = exe.run(
                main_program,
                feed={},
                fetch_list=[x_out_var.name, mask_var.name])
            x_in_np = np.ones([40, 40]).astype("float32")
346
            np.testing.assert_allclose(x_out, x_in_np, rtol=1e-05)
347 348


349
class TestDropoutOpError(unittest.TestCase):
350

351 352 353 354 355
    def test_errors(self):
        with program_guard(Program(), Program()):

            def test_Variable():
                # the input of dropout must be Variable.
356 357
                x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                             [[1, 1, 1, 1]], fluid.CPUPlace())
358 359 360 361 362 363 364
                fluid.layers.dropout(x1, dropout_prob=0.5)

            self.assertRaises(TypeError, test_Variable)

            def test_dtype():
                # the input dtype of dropout must be float16 or float32 or float64
                # float16 only can be set on GPU place
365 366 367
                x2 = fluid.layers.data(name='x2',
                                       shape=[3, 4, 5, 6],
                                       dtype="int32")
368 369 370 371 372
                fluid.layers.dropout(x2, dropout_prob=0.5)

            self.assertRaises(TypeError, test_dtype)


373
class TestDropoutFAPI(unittest.TestCase):
374

375 376 377 378 379 380 381 382
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
383
            input = fluid.data(name="input", shape=[-1, -1], dtype="float32")
384
            res1 = paddle.nn.functional.dropout(x=input, p=0., training=False)
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
            res2 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=0,
                                                training=True,
                                                mode='upscale_in_train')
            res3 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=0,
                                                training=True,
                                                mode='downscale_in_infer')
            res4 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=0,
                                                training=False,
                                                mode='upscale_in_train')
            res5 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=0,
                                                training=False,
                                                mode='downscale_in_infer')
            res6 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=[0, 1],
                                                training=True,
                                                mode='upscale_in_train')
            res7 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=[0, 1],
                                                training=True,
                                                mode='downscale_in_infer')
            res8 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=[0, 1],
                                                training=False,
                                                mode='upscale_in_train')
            res9 = paddle.nn.functional.dropout(x=input,
                                                p=0.,
                                                axis=[0, 1],
                                                training=False,
                                                mode='downscale_in_infer')
425
            res10 = paddle.nn.functional.dropout(x=input, p=1., training=True)
426
            res11 = paddle.fluid.layers.dropout(x=input, dropout_prob=0.)
427 428 429 430 431 432 433 434 435 436 437
            res12 = paddle.nn.functional.dropout(x=input,
                                                 p=0.,
                                                 axis=(0, 1),
                                                 training=False,
                                                 mode='upscale_in_train')

            res13 = paddle.nn.functional.dropout(x=input,
                                                 p=0.7,
                                                 axis=1,
                                                 training=True,
                                                 mode='upscale_in_train')
438 439

            in_np = np.ones([40, 40]).astype("float32")
440 441 442 443
            res_np = in_np
            res_np2 = np.zeros_like(in_np)

            exe = fluid.Executor(place)
444
            res_list = [
445 446
                res1, res2, res3, res4, res5, res6, res7, res8, res9, res11,
                res12
447
            ]
448 449 450 451
            for res in res_list:
                fetches = exe.run(fluid.default_main_program(),
                                  feed={"input": in_np},
                                  fetch_list=[res])
452
                np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05)
453 454 455
            fetches2 = exe.run(fluid.default_main_program(),
                               feed={"input": in_np},
                               fetch_list=[res10])
456
            np.testing.assert_allclose(fetches2[0], res_np2, rtol=1e-05)
457 458 459
            fetches3 = exe.run(fluid.default_main_program(),
                               feed={"input": in_np},
                               fetch_list=[res13])
460 461 462 463 464 465 466 467 468 469 470 471 472

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                in_np = np.random.random([40, 40]).astype("float32")
                res_np = in_np
                res_np2 = np.zeros_like(in_np)
                input = fluid.dygraph.to_variable(in_np)

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
                res1 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    training=False)
                res2 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=0,
                                                    training=True,
                                                    mode='upscale_in_train')
                res3 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=0,
                                                    training=True,
                                                    mode='downscale_in_infer')
                res4 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=0,
                                                    training=False,
                                                    mode='upscale_in_train')
                res5 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=0,
                                                    training=False,
                                                    mode='downscale_in_infer')
                res6 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=[0, 1],
                                                    training=True,
                                                    mode='upscale_in_train')
                res7 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=[0, 1],
                                                    training=True,
                                                    mode='downscale_in_infer')
                res8 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=[0, 1],
                                                    training=False,
                                                    mode='upscale_in_train')
                res9 = paddle.nn.functional.dropout(x=input,
                                                    p=0.,
                                                    axis=[0, 1],
                                                    training=False,
                                                    mode='downscale_in_infer')
                res10 = paddle.nn.functional.dropout(x=input,
                                                     p=1.,
                                                     training=True)
519 520
                dropout = paddle.fluid.dygraph.Dropout(p=0, )
                res11 = dropout(input)
521 522 523 524 525 526 527 528 529 530
                res12 = paddle.nn.functional.dropout(x=input,
                                                     p=0.,
                                                     axis=(0, 1),
                                                     training=False,
                                                     mode='upscale_in_train')
                res13 = paddle.nn.functional.dropout(x=input,
                                                     p=0.5,
                                                     axis=1,
                                                     training=True,
                                                     mode='upscale_in_train')
531

532
            res_list = [
533 534
                res1, res2, res3, res4, res5, res6, res7, res8, res9, res11,
                res12
535
            ]
536
            for res in res_list:
537 538
                np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05)
            np.testing.assert_allclose(res10.numpy(), res_np2, rtol=1e-05)
539 540 541


class TestDropoutFAPIError(unittest.TestCase):
542

543 544 545 546 547
    def test_errors(self):
        with program_guard(Program(), Program()):

            def test_Variable():
                # the input of dropout must be Variable.
548 549
                x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                             [[1, 1, 1, 1]], fluid.CPUPlace())
550 551 552 553 554 555
                paddle.nn.functional.dropout(x1, p=0.5)

            self.assertRaises(TypeError, test_Variable)

            def test_Variable2():
                # the input of dropout must be Variable.
556 557
                x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                             [[1, 1, 1, 1]], fluid.CPUPlace())
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
                paddle.nn.functional.dropout(x1, p=0.5, axis=0)

            self.assertRaises(TypeError, test_Variable2)

            def test_dtype():
                # the input dtype of dropout must be float32 or float64
                # float16 only can be set on GPU place
                xr = fluid.data(name='xr', shape=[3, 4, 5, 6], dtype="int32")
                paddle.nn.functional.dropout(xr, p=0.5)

            self.assertRaises(TypeError, test_dtype)

            def test_pdtype():
                # p should be int or float
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.dropout(x2, p='0.5')

            self.assertRaises(TypeError, test_pdtype)

            def test_pvalue():
                # p should be 0.<=p<=1.
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.dropout(x2, p=1.2)

            self.assertRaises(ValueError, test_pvalue)

            def test_mode():
                # mode should be 'downscale_in_infer' or 'upscale_in_train'
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.dropout(x2, mode='abc')

            self.assertRaises(ValueError, test_mode)

            def test_axis():
                # axis should be int or list
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.dropout(x2, axis=1.2)

            self.assertRaises(TypeError, test_axis)

            def test_axis_max():
                # maximum of axis should less than dimensions of x
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.dropout(x2, axis=[0, 5])

            self.assertRaises(ValueError, test_axis_max)

605 606 607 608 609 610 611
            def test_axis_min():
                # minimum of axis should greater equal than 0
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.dropout(x2, axis=[0, -1])

            self.assertRaises(ValueError, test_axis_min)

612 613 614 615 616 617 618 619 620
            def test_axis_len():
                # length of axis should not greater than dimensions of x
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.dropout(x2, axis=[0, 1, 2, 3, 4])

            self.assertRaises(ValueError, test_axis_len)


class TestDropoutCAPI(unittest.TestCase):
621

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                input_np = np.random.random([40, 40]).astype("float32")
                result_np = input_np
                input = fluid.dygraph.to_variable(input_np)
                m = paddle.nn.Dropout(p=0.)
                m.eval()
                result = m(input)
637 638 639
                np.testing.assert_allclose(result.numpy(),
                                           result_np,
                                           rtol=1e-05)
640 641


C
cnn 已提交
642
class TestDropout2DFAPI(unittest.TestCase):
643

644 645 646 647 648 649 650 651
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
652 653 654 655 656 657 658 659 660 661 662
            input = fluid.data(name="input",
                               shape=[2, 3, 4, 5],
                               dtype="float32")
            res1 = paddle.nn.functional.dropout2d(x=input,
                                                  p=0.,
                                                  training=False,
                                                  data_format='NCHW')
            res2 = paddle.nn.functional.dropout2d(x=input,
                                                  p=0.,
                                                  training=False,
                                                  data_format='NHWC')
663 664 665 666 667 668 669 670 671 672

            in_np = np.random.random([2, 3, 4, 5]).astype("float32")
            res_np = in_np

            exe = fluid.Executor(place)
            res_list = [res1, res2]
            for res in res_list:
                fetches = exe.run(fluid.default_main_program(),
                                  feed={"input": in_np},
                                  fetch_list=[res])
673
                np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05)
674 675 676 677 678 679 680 681 682 683 684 685

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                in_np = np.random.random([2, 3, 4, 5]).astype("float32")
                res_np = in_np
                input = fluid.dygraph.to_variable(in_np)

686 687 688 689 690 691 692 693
                res1 = paddle.nn.functional.dropout2d(x=input,
                                                      p=0.,
                                                      training=False,
                                                      data_format='NCHW')
                res2 = paddle.nn.functional.dropout2d(x=input,
                                                      p=0.,
                                                      training=False,
                                                      data_format='NHWC')
694 695 696

            res_list = [res1, res2]
            for res in res_list:
697
                np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05)
698 699


C
cnn 已提交
700
class TestDropout2DFAPIError(unittest.TestCase):
701

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
    def test_errors(self):
        with program_guard(Program(), Program()):

            def test_xdim():
                # dimentions of x should be 4
                x = fluid.data(name='x1', shape=[2, 3, 4, 5, 6], dtype="int32")
                paddle.nn.functional.dropout2d(x)

            self.assertRaises(ValueError, test_xdim)

            def test_dataformat():
                # data_format should be 'NCHW' or 'NHWC'
                x = fluid.data(name='x2', shape=[2, 3, 4, 5], dtype="int32")
                paddle.nn.functional.dropout2d(x, data_format='CNHW')

            self.assertRaises(ValueError, test_dataformat)


C
cnn 已提交
720
class TestDropout2DCAPI(unittest.TestCase):
721

722 723 724 725 726 727 728 729 730 731 732 733
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                input_np = np.random.random([2, 3, 4, 5]).astype("float32")
                result_np = input_np
                input = fluid.dygraph.to_variable(input_np)
C
cnn 已提交
734
                m = paddle.nn.Dropout2D(p=0.)
735 736
                m.eval()
                result = m(input)
737 738 739
                np.testing.assert_allclose(result.numpy(),
                                           result_np,
                                           rtol=1e-05)
740 741


C
cnn 已提交
742
class TestDropout3DFAPI(unittest.TestCase):
743

744 745 746 747 748 749 750 751
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
752 753 754 755 756 757 758 759 760 761 762
            input = fluid.data(name="input",
                               shape=[2, 3, 4, 5, 6],
                               dtype="float32")
            res1 = paddle.nn.functional.dropout3d(x=input,
                                                  p=0.,
                                                  training=False,
                                                  data_format='NCDHW')
            res2 = paddle.nn.functional.dropout3d(x=input,
                                                  p=0.,
                                                  training=False,
                                                  data_format='NDHWC')
763 764 765 766 767 768 769 770 771 772

            in_np = np.random.random([2, 3, 4, 5, 6]).astype("float32")
            res_np = in_np

            exe = fluid.Executor(place)
            res_list = [res1, res2]
            for res in res_list:
                fetches = exe.run(fluid.default_main_program(),
                                  feed={"input": in_np},
                                  fetch_list=[res])
773
                np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05)
774 775 776 777 778 779 780 781 782 783 784 785

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                in_np = np.random.random([2, 3, 4, 5, 6]).astype("float32")
                res_np = in_np
                input = fluid.dygraph.to_variable(in_np)

786 787 788 789 790 791 792 793
                res1 = paddle.nn.functional.dropout3d(x=input,
                                                      p=0.,
                                                      training=False,
                                                      data_format='NCDHW')
                res2 = paddle.nn.functional.dropout3d(x=input,
                                                      p=0.,
                                                      training=False,
                                                      data_format='NDHWC')
794 795 796

            res_list = [res1, res2]
            for res in res_list:
797
                np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05)
798 799


C
cnn 已提交
800
class TestDropout3DFAPIError(unittest.TestCase):
801

802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
    def test_errors(self):
        with program_guard(Program(), Program()):

            def test_xdim():
                # dimentions of x should be 5
                x = fluid.data(name='x1', shape=[2, 3, 4, 5], dtype="int32")
                paddle.nn.functional.dropout3d(x)

            self.assertRaises(ValueError, test_xdim)

            def test_dataformat():
                # data_format should be 'NCDHW' or 'NDHWC'
                x = fluid.data(name='x2', shape=[2, 3, 4, 5, 6], dtype="int32")
                paddle.nn.functional.dropout3d(x, data_format='CNDHW')

            self.assertRaises(ValueError, test_dataformat)


C
cnn 已提交
820
class TestDropout3DCAPI(unittest.TestCase):
821

822 823 824 825 826 827 828 829 830 831 832 833
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32")
                result_np = input_np
                input = fluid.dygraph.to_variable(input_np)
C
cnn 已提交
834
                m = paddle.nn.Dropout3D(p=0.)
835 836
                m.eval()
                result = m(input)
837 838 839
                np.testing.assert_allclose(result.numpy(),
                                           result_np,
                                           rtol=1e-05)
840 841


842
class TestAlphaDropoutFAPI(unittest.TestCase):
843

844 845 846 847 848 849 850 851 852 853
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[40, 40], dtype="float32")
            res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.)
854 855 856
            res2 = paddle.nn.functional.alpha_dropout(x=input,
                                                      p=0.,
                                                      training=False)
857
            res3 = paddle.nn.functional.alpha_dropout(x=input, p=1.)
858 859 860

            in_np = np.random.random([40, 40]).astype("float32")
            res_np = in_np
861
            res_np3 = np.zeros_like(in_np)
862 863 864 865 866 867 868

            exe = fluid.Executor(place)
            res_list = [res1, res2]
            for res in res_list:
                fetches = exe.run(fluid.default_main_program(),
                                  feed={"input": in_np},
                                  fetch_list=[res])
869
                np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05)
870 871 872
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": in_np},
                              fetch_list=[res3])
873
            np.testing.assert_allclose(fetches[0], res_np3, rtol=1e-05)
874 875 876 877 878 879 880 881 882 883

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                in_np = np.random.random([40, 40]).astype("float32")
                res_np = in_np
884
                res_np3 = np.zeros_like(in_np)
885 886 887
                input = fluid.dygraph.to_variable(in_np)

                res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.)
888 889 890
                res2 = paddle.nn.functional.alpha_dropout(x=input,
                                                          p=0.,
                                                          training=False)
891
                res3 = paddle.nn.functional.alpha_dropout(x=input, p=1.)
892 893 894

            res_list = [res1, res2]
            for res in res_list:
895 896
                np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05)
            np.testing.assert_allclose(res3.numpy(), res_np3, rtol=1e-05)
897 898 899


class TestAlphaDropoutFAPIError(unittest.TestCase):
900

901 902 903 904 905
    def test_errors(self):
        with program_guard(Program(), Program()):

            def test_Variable():
                # the input of dropout must be Variable.
906 907
                x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                             [[1, 1, 1, 1]], fluid.CPUPlace())
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
                paddle.nn.functional.alpha_dropout(x1, p=0.5)

            self.assertRaises(TypeError, test_Variable)

            def test_dtype():
                # the input dtype of dropout must be float32 or float64
                xr = fluid.data(name='xr', shape=[3, 4, 5, 6], dtype="int32")
                paddle.nn.functional.alpha_dropout(xr)

            self.assertRaises(TypeError, test_dtype)

            def test_pdtype():
                # p should be int or float
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.alpha_dropout(x2, p='0.5')

            self.assertRaises(TypeError, test_pdtype)

            def test_pvalue():
                # p should be 0.<=p<=1.
                x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32")
                paddle.nn.functional.alpha_dropout(x2, p=1.2)

            self.assertRaises(ValueError, test_pvalue)


class TestAlphaDropoutCAPI(unittest.TestCase):
935

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                input_np = np.random.random([40, 40]).astype("float32")
                result_np = input_np
                input = fluid.dygraph.to_variable(input_np)
                m = paddle.nn.AlphaDropout(p=0.)
                m.eval()
                result = m(input)
951 952 953
                np.testing.assert_allclose(result.numpy(),
                                           result_np,
                                           rtol=1e-05)
954 955


956
class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase):
957

958 959 960 961 962 963 964 965 966 967 968 969 970
    def setUp(self):
        paddle.framework.random.set_random_seed_generator('seed0', 123)
        paddle.framework.random.set_random_seed_generator('seed1', 123)
        rng0 = paddle.framework.random.get_random_seed_generator('seed0')
        rng1 = paddle.framework.random.get_random_seed_generator('seed1')
        self.places = [paddle.CPUPlace()]
        if paddle.is_compiled_with_cuda():
            self.places.append(paddle.CUDAPlace(0))

    def check_static_result(self, place):
        from paddle.distributed.fleet.meta_parallel.parallel_layers.random import dropout
        with static.program_guard(static.Program(), static.Program()):
            input = static.data(name="input", shape=[40, 40], dtype="float32")
971 972 973 974 975 976 977 978 979 980
            res1 = dropout(input,
                           p=0.3,
                           training=True,
                           mode='upscale_in_train',
                           rng_name='seed0')
            res2 = dropout(input,
                           p=0.3,
                           training=True,
                           mode='upscale_in_train',
                           rng_name='seed1')
981 982 983 984 985 986 987 988 989 990
            res3 = dropout(input, p=0.3)

            in_np = np.random.random([40, 40]).astype("float32")

            exe = static.Executor(place)
            res_list = [res1, res2]
            for i in range(2):
                out1, out2 = exe.run(static.default_main_program(),
                                     feed={"input": in_np},
                                     fetch_list=res_list)
991
                np.testing.assert_allclose(out1, out2, rtol=1e-05)
992 993 994 995 996 997

    def test_static(self):
        for place in self.places:
            self.check_static_result(place=place)


H
hong 已提交
998
class TestDropoutBackward(unittest.TestCase):
999

H
hong 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
    def setUp(self):
        np.random.seed(123)
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def cal_grad_upscale_train(self, mask, prob):
        return mask.astype("float32") / (1 - prob)

    def cal_grad_downscale_in_infer(self, mask):
        return mask.astype("float32")

    def test_backward_downscale_in_infer(self):
1013
        _enable_legacy_dygraph()
H
hong 已提交
1014 1015 1016 1017 1018 1019 1020 1021
        for place in self.places:
            with fluid.dygraph.guard(place):

                input = paddle.uniform([40, 40], dtype="float32")
                input.stop_gradient = False
                out, mask = core.ops.dropout(input, 'dropout_prob', 0.5)
                out.backward()

1022 1023 1024
                np.testing.assert_array_equal(
                    input.gradient(),
                    self.cal_grad_downscale_in_infer(mask.numpy()))
H
hong 已提交
1025

H
hong 已提交
1026 1027 1028 1029 1030 1031
    def test_backward_downscale_in_infer_eager(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                with _test_eager_guard():
                    input = paddle.uniform([40, 40], dtype="float32")
                    input.stop_gradient = False
1032 1033
                    out, mask = _C_ops.dropout(input, None, 0.5, False,
                                               "downgrade_in_infer", 0, False)
H
hong 已提交
1034
                    out.backward()
1035 1036 1037
                    np.testing.assert_array_equal(
                        input.gradient(),
                        self.cal_grad_downscale_in_infer(mask.numpy()))
H
hong 已提交
1038

H
hong 已提交
1039
    def test_backward_upscale_train(self):
1040
        _enable_legacy_dygraph()
H
hong 已提交
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
        for place in self.places:
            with fluid.dygraph.guard(place):

                prob = 0.5
                input = paddle.uniform([40, 40], dtype="float32")
                input.stop_gradient = False
                out, mask = core.ops.dropout(input, 'dropout_prob', prob,
                                             "dropout_implementation",
                                             "upscale_in_train")
                out.backward()

1052 1053 1054 1055
                np.testing.assert_allclose(input.gradient(),
                                           self.cal_grad_upscale_train(
                                               mask.numpy(), prob),
                                           rtol=1e-05)
H
hong 已提交
1056 1057 1058 1059 1060 1061 1062 1063

    def test_backward_upscale_train_eager(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                with _test_eager_guard():
                    prob = 0.5
                    input = paddle.uniform([40, 40], dtype="float32")
                    input.stop_gradient = False
1064 1065
                    out, mask = _C_ops.dropout(input, None, 0.5, False,
                                               "upscale_in_train", 0, False)
H
hong 已提交
1066 1067
                    out.backward()

1068 1069 1070 1071
                    np.testing.assert_allclose(input.gradient(),
                                               self.cal_grad_upscale_train(
                                                   mask.numpy(), prob),
                                               rtol=1e-05)
H
hong 已提交
1072 1073

    def test_backward_upscale_train_2(self):
1074
        _enable_legacy_dygraph()
H
hong 已提交
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
        for place in self.places:
            with fluid.dygraph.guard(place):

                prob = 0.3
                input = paddle.uniform([40, 40], dtype="float32")
                input.stop_gradient = False
                out, mask = core.ops.dropout(input, 'dropout_prob', prob,
                                             "dropout_implementation",
                                             "upscale_in_train")
                out.backward()

1086 1087 1088 1089
                np.testing.assert_allclose(input.gradient(),
                                           self.cal_grad_upscale_train(
                                               mask.numpy(), prob),
                                           rtol=1e-05)
H
hong 已提交
1090

1091 1092 1093 1094 1095 1096 1097 1098
    def test_backward_upscale_train_2_eager(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                with _test_eager_guard():

                    prob = 0.3
                    input = paddle.uniform([40, 40], dtype="float32")
                    input.stop_gradient = False
1099 1100
                    out, mask = _C_ops.dropout(input, None, 0.3, False,
                                               "upscale_in_train", 0, False)
1101 1102 1103

                    out.backward()

1104 1105 1106 1107
                    np.testing.assert_allclose(input.gradient(),
                                               self.cal_grad_upscale_train(
                                                   mask.numpy(), prob),
                                               rtol=1e-05)
1108

H
hong 已提交
1109

1110 1111 1112
class TestDropOutWithProbTensor(unittest.TestCase):

    def setUp(self):
1113 1114
        self.init_info()
        self.input = np.random.random(self.shape).astype("float32")
1115 1116 1117
        self.place = paddle.CUDAPlace(
            0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace()

1118 1119 1120 1121
    def init_info(self):
        self.shape = [10, 10]
        self.api = paddle.nn.functional.dropout

1122 1123
    def api_case(self, x):
        p = paddle.assign([0.5])
1124
        out = self.api(x=x, p=p, training=True)
1125 1126 1127 1128 1129 1130 1131 1132 1133
        return out

    def run_static(self, x):
        paddle.seed(2022)
        main_program = Program()

        with program_guard(main_program):
            input = paddle.static.data(shape=x.shape, name='x', dtype='float32')
            out = self.api_case(input)
1134 1135
            sgd = paddle.optimizer.SGD(learning_rate=0.1)
            sgd.minimize(paddle.mean(out))
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148

            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'x': x}, fetch_list=[out])

        return res[0]

    def run_dygraph(self, x):
        paddle.seed(2022)
        with fluid.dygraph.guard(self.place):
            out = self.api_case(paddle.to_tensor(x))
        return out

    def test_p_tensor(self):
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
        static_res = self.run_static(self.input)
        dygraph_res = self.run_dygraph(self.input)
        np.testing.assert_array_equal(static_res, dygraph_res)


class TestDropOut2DWithProbTensor(TestDropOutWithProbTensor):

    def init_info(self):
        self.shape = [2, 3, 10, 10]
        self.api = paddle.nn.functional.dropout2d


class TestDropOut3DWithProbTensor(TestDropOutWithProbTensor):

    def init_info(self):
        self.shape = [2, 3, 8, 8, 8]
        self.api = paddle.nn.functional.dropout3d
1166 1167


1168
class TestRandomValue(unittest.TestCase):
1169

1170 1171 1172 1173 1174 1175
    def test_fixed_random_number(self):
        # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
        if not paddle.is_compiled_with_cuda():
            return

        # Different GPU generate different random value. Only test V100 here.
1176
        if "V100" not in paddle.device.cuda.get_device_name():
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
            return

        print("Test Fixed Random number on V100 GPU------>")
        paddle.disable_static()
        paddle.set_device('gpu')
        paddle.seed(100)

        x = paddle.rand([32, 1024, 1024], dtype='float32')
        out = paddle.nn.functional.dropout(x, 0.25).numpy()
        index0, index1, index2 = np.nonzero(out)
        self.assertEqual(np.sum(index0), 390094540)
        self.assertEqual(np.sum(index1), 12871475125)
        self.assertEqual(np.sum(index2), 12872777397)
        self.assertEqual(np.sum(out), 16778744.0)
        expect = [
            0.6914956, 0.5294584, 0.19032137, 0.6996228, 0.3338527, 0.8442094,
            0.96965003, 1.1726775, 0., 0.28037727
        ]
1195
        np.testing.assert_allclose(out[10, 100, 500:510], expect, rtol=1e-05)
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207

        x = paddle.rand([32, 1024, 1024], dtype='float64')
        out = paddle.nn.functional.dropout(x).numpy()
        index0, index1, index2 = np.nonzero(out)
        self.assertEqual(np.sum(index0), 260065137)
        self.assertEqual(np.sum(index1), 8582636095)
        self.assertEqual(np.sum(index2), 8582219962)
        self.assertEqual(np.sum(out), 16778396.563660286)
        expect = [
            1.28587354, 0.15563703, 0., 0.28799703, 0., 0., 0., 0.54964,
            0.51355682, 0.33818988
        ]
1208
        np.testing.assert_allclose(out[20, 100, 500:510], expect, rtol=1e-05)
1209 1210 1211 1212 1213 1214 1215 1216

        x = paddle.ones([32, 1024, 1024], dtype='float16')
        out = paddle.nn.functional.dropout(x, 0.75).numpy()
        index0, index1, index2 = np.nonzero(out)
        self.assertEqual(np.sum(index0), 130086900)
        self.assertEqual(np.sum(index1), 4291190105)
        self.assertEqual(np.sum(index2), 4292243807)
        expect = [0., 0., 0., 0., 0., 0., 0., 0., 4., 4.]
1217
        np.testing.assert_allclose(out[0, 100, 500:510], expect, rtol=1e-05)
1218 1219 1220 1221

        paddle.enable_static()


1222
if __name__ == '__main__':
H
hong 已提交
1223
    paddle.enable_static()
1224
    unittest.main()