test_dropout_op.py 7.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
20
from op_test import OpTest, skip_check_grad_ci
21 22
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
23 24


25
class TestDropoutOp(OpTest):
26
    def setUp(self):
27
        self.op_type = "dropout"
28
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
29
        self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
Y
Yu Yang 已提交
30 31
        self.outputs = {
            'Out': self.inputs['X'],
Z
Zeng Jinle 已提交
32
            'Mask': np.ones((32, 64)).astype('uint8')
Y
Yu Yang 已提交
33
        }
34

35 36 37 38
    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
39
        self.check_grad(['X'], 'Out')
40 41


42
class TestDropoutOp2(TestDropoutOp):
43
    def setUp(self):
44
        self.op_type = "dropout"
45
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
46
        self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False}
Y
Yu Yang 已提交
47 48
        self.outputs = {
            'Out': np.zeros((32, 64)).astype('float32'),
Z
Zeng Jinle 已提交
49
            'Mask': np.zeros((32, 64)).astype('uint8')
Y
Yu Yang 已提交
50
        }
51 52


53
class TestDropoutOp3(TestDropoutOp):
54
    def setUp(self):
55 56
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
57
        self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
Y
Yu Yang 已提交
58 59
        self.outputs = {
            'Out': self.inputs['X'],
Z
Zeng Jinle 已提交
60
            'Mask': np.ones((32, 64, 2)).astype('uint8')
Y
Yu Yang 已提交
61
        }
62 63


64
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
65 66 67 68
class TestDropoutOp4(OpTest):
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
69
        self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True}
70 71 72
        self.outputs = {
            'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob'])
        }
73 74 75 76 77

    def test_check_output(self):
        self.check_output()


78
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
79 80 81 82
class TestDropoutOp5(OpTest):
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
83
        self.attrs = {'dropout_prob': 0.75, 'is_test': True}
84 85 86
        self.outputs = {
            'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob'])
        }
87 88

    def test_check_output(self):
P
phlrain 已提交
89 90 91 92 93 94 95 96 97 98 99
        self.check_output()


class TestDropoutOp6(TestDropoutOp):
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
        self.attrs = {
            'dropout_prob': 1.0,
            'fix_seed': True,
            'is_test': False,
P
phlrain 已提交
100
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
101 102 103
        }
        self.outputs = {
            'Out': np.zeros((32, 64)).astype('float32'),
Z
Zeng Jinle 已提交
104
            'Mask': np.zeros((32, 64)).astype('uint8')
P
phlrain 已提交
105 106 107 108 109 110 111 112 113 114 115
        }


class TestDropoutOp7(TestDropoutOp):
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
        self.attrs = {
            'dropout_prob': 0.0,
            'fix_seed': True,
            'is_test': False,
P
phlrain 已提交
116
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
117 118 119
        }
        self.outputs = {
            'Out': self.inputs['X'],
Z
Zeng Jinle 已提交
120
            'Mask': np.ones((32, 64, 2)).astype('uint8')
P
phlrain 已提交
121 122 123
        }


124
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
P
phlrain 已提交
125 126 127 128 129 130 131 132
class TestDropoutOp8(OpTest):
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
        self.attrs = {
            'dropout_prob': 0.35,
            'fix_seed': True,
            'is_test': True,
P
phlrain 已提交
133
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
134 135 136 137 138 139 140
        }
        self.outputs = {'Out': self.inputs['X']}

    def test_check_output(self):
        self.check_output()


141
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
P
phlrain 已提交
142 143 144 145 146 147 148
class TestDropoutOp9(OpTest):
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
        self.attrs = {
            'dropout_prob': 0.75,
            'is_test': True,
P
phlrain 已提交
149
            'dropout_implementation': 'upscale_in_train'
P
phlrain 已提交
150 151 152 153
        }
        self.outputs = {'Out': self.inputs['X']}

    def test_check_output(self):
154 155 156
        self.check_output()


M
mapingshuo 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
class TestDropoutOpWithSeed(OpTest):
    def setUp(self):
        self.op_type = "dropout"
        self.inputs = {
            "X": np.random.random((32, 64)).astype("float32"),
            "Seed": np.asarray(
                [125], dtype="int32")
        }
        self.attrs = {'dropout_prob': 0.0, }
        self.outputs = {
            'Out': self.inputs['X'],
            'Mask': np.ones((32, 64)).astype('uint8')
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.05)


178 179 180
@unittest.skipIf(
    not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"),
    "core is not compiled with CUDA or core is not support dropout")
181
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
K
Kexin Zhao 已提交
182
class TestFP16DropoutOp(OpTest):
K
Kexin Zhao 已提交
183 184
    def setUp(self):
        self.op_type = "dropout"
K
Kexin Zhao 已提交
185 186 187 188
        self.init_test_case()

        x = np.random.random(self.input_size).astype("float16")
        out = x * (1.0 - self.prob)
K
Kexin Zhao 已提交
189
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
K
Kexin Zhao 已提交
190 191 192 193 194
        self.attrs = {
            'dropout_prob': self.prob,
            'fix_seed': self.fix_seed,
            'is_test': True
        }
195
        self.outputs = {'Out': out}
K
Kexin Zhao 已提交
196

K
Kexin Zhao 已提交
197 198 199 200 201
    def init_test_case(self):
        self.input_size = [32, 64]
        self.prob = 0.35
        self.fix_seed = True

K
Kexin Zhao 已提交
202
    def test_check_output(self):
203
        self.check_output_with_place(core.CUDAPlace(0), atol=1e-3)
K
Kexin Zhao 已提交
204 205


206 207 208
@unittest.skipIf(
    not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"),
    "core is not compiled with CUDA or core is not support dropout")
209
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
K
Kexin Zhao 已提交
210 211 212 213 214
class TestFP16DropoutOp2(TestFP16DropoutOp):
    def init_test_case(self):
        self.input_size = [32, 64, 3]
        self.prob = 0.75
        self.fix_seed = False
K
Kexin Zhao 已提交
215 216


217
class TestDropoutOpError(unittest.TestCase):
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
    def test_errors(self):
        with program_guard(Program(), Program()):

            def test_Variable():
                # the input of dropout must be Variable.
                x1 = fluid.create_lod_tensor(
                    np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
                fluid.layers.dropout(x1, dropout_prob=0.5)

            self.assertRaises(TypeError, test_Variable)

            def test_dtype():
                # the input dtype of dropout must be float16 or float32 or float64
                # float16 only can be set on GPU place
                x2 = fluid.layers.data(
                    name='x2', shape=[3, 4, 5, 6], dtype="int32")
                fluid.layers.dropout(x2, dropout_prob=0.5)

            self.assertRaises(TypeError, test_dtype)


239 240
if __name__ == '__main__':
    unittest.main()