test_squeeze_op.py 9.9 KB
Newer Older
1
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18 19

import paddle
20
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22 23
from op_test import OpTest, convert_float_to_uint16
import paddle.fluid.core as core
24 25 26
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
27

28
paddle.enable_static()
29 30 31


# Correct: General.
C
chenweihang 已提交
32
class TestSqueezeOp(OpTest):
33

34
    def setUp(self):
35
        self.op_type = "squeeze"
C
chenweihang 已提交
36
        self.init_test_case()
37
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
38
        self.init_attrs()
39 40 41
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
        }
42 43

    def test_check_output(self):
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (1, 3, 1, 40)
        self.axes = (0, 2)
        self.new_shape = (3, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


class TestSqueezeBF16Op(OpTest):
59

60 61 62 63 64 65 66 67 68 69 70
    def setUp(self):
        self.op_type = "squeeze"
        self.dtype = np.uint16
        self.init_test_case()
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
71
        self.check_output()
72 73 74 75

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

C
chenweihang 已提交
76
    def init_test_case(self):
Z
zhupengyang 已提交
77
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
78
        self.axes = (0, 2)
Z
zhupengyang 已提交
79
        self.new_shape = (3, 40)
80

C
chenweihang 已提交
81
    def init_attrs(self):
82
        self.attrs = {"axes": self.axes}
83 84


C
chenweihang 已提交
85 86
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
87

C
chenweihang 已提交
88
    def init_test_case(self):
Z
zhupengyang 已提交
89
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
90
        self.axes = (0, -2)
Z
zhupengyang 已提交
91
        self.new_shape = (3, 40)
92 93 94


# Correct: No axes input.
C
chenweihang 已提交
95
class TestSqueezeOp2(TestSqueezeOp):
96

C
chenweihang 已提交
97
    def init_test_case(self):
Z
zhupengyang 已提交
98
        self.ori_shape = (1, 20, 1, 5)
C
chenweihang 已提交
99
        self.axes = ()
Z
zhupengyang 已提交
100
        self.new_shape = (20, 5)
101 102


103
# Correct: Just part of axes be squeezed.
C
chenweihang 已提交
104
class TestSqueezeOp3(TestSqueezeOp):
105

C
chenweihang 已提交
106
    def init_test_case(self):
Z
zhupengyang 已提交
107
        self.ori_shape = (6, 1, 5, 1, 4, 1)
C
chenweihang 已提交
108
        self.axes = (1, -1)
Z
zhupengyang 已提交
109
        self.new_shape = (6, 5, 1, 4)
110 111


L
Leo Chen 已提交
112 113
# Correct: The demension of axis is not of size 1 remains unchanged.
class TestSqueezeOp4(TestSqueezeOp):
114

L
Leo Chen 已提交
115 116 117 118 119 120
    def init_test_case(self):
        self.ori_shape = (6, 1, 5, 1, 4, 1)
        self.axes = (1, 2)
        self.new_shape = (6, 5, 1, 4, 1)


121
class TestSqueezeOpError(unittest.TestCase):
122

123
    def test_errors(self):
124
        paddle.enable_static()
125 126
        with program_guard(Program(), Program()):
            # The input type of softmax_op must be Variable.
127 128
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         paddle.CPUPlace())
129
            self.assertRaises(TypeError, paddle.squeeze, x1)
130
            # The input axes of squeeze must be list.
131 132
            x2 = paddle.static.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, paddle.squeeze, x2, axes=0)
133
            # The input dtype of squeeze not support float16.
134 135
            x3 = paddle.static.data(name='x3', shape=[4], dtype="float16")
            self.assertRaises(TypeError, paddle.squeeze, x3, axes=0)
136 137


138
class API_TestSqueeze(unittest.TestCase):
139

140 141 142 143 144 145
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

146
    def test_out(self):
147 148 149
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
150 151 152
            data1 = paddle.static.data('data1',
                                       shape=[-1, 1, 10],
                                       dtype='float64')
153
            result_squeeze = self.squeeze(data1, axis=[1])
154 155
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
156 157 158 159
            input1 = np.random.random([5, 1, 10]).astype('float64')
            result, = exe.run(feed={"data1": input1},
                              fetch_list=[result_squeeze])
            expected_result = np.squeeze(input1, axis=1)
160
            np.testing.assert_allclose(expected_result, result, rtol=1e-05)
161 162


163
class API_TestStaticSqueeze_(API_TestSqueeze):
164

165 166 167 168
    def executed_api(self):
        self.squeeze = paddle.squeeze_


169
class API_TestDygraphSqueeze(unittest.TestCase):
170

171 172 173 174 175 176
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

177
    def test_out(self):
178 179 180
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
181
        output = self.squeeze(input, axis=[1])
182 183
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
184
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
185 186 187 188 189

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
190
        output = self.squeeze(input, axis=[1])
191 192
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
193
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
194 195 196 197 198

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
199
        output = self.squeeze(input, axis=[1])
200 201
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
202
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
203 204

    def test_axis_not_list(self):
205 206 207
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
208
        output = self.squeeze(input, axis=1)
209 210
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
211
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
212 213

    def test_dimension_not_1(self):
214 215 216
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
217
        output = self.squeeze(input, axis=(1, 0))
218 219
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
220
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
221 222


223
class API_TestDygraphSqueezeInplace(API_TestDygraphSqueeze):
224

225 226 227 228
    def executed_api(self):
        self.squeeze = paddle.squeeze_


229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
class TestSqueezeDoubleGradCheck(unittest.TestCase):

    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.squeeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestSqueezeTripleGradCheck(unittest.TestCase):

    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.squeeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


303 304
if __name__ == "__main__":
    unittest.main()