test_squeeze_op.py 9.1 KB
Newer Older
1
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import gradient_checker
18
import numpy as np
19 20
from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16
21 22

import paddle
23
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25
import paddle.fluid.layers as layers
26
from paddle.fluid import Program, program_guard
27

28
paddle.enable_static()
29 30 31


# Correct: General.
C
chenweihang 已提交
32
class TestSqueezeOp(OpTest):
33
    def setUp(self):
34
        self.op_type = "squeeze"
C
chenweihang 已提交
35
        self.init_test_case()
36
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
37
        self.init_attrs()
38 39 40
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
        }
41 42

    def test_check_output(self):
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (1, 3, 1, 40)
        self.axes = (0, 2)
        self.new_shape = (3, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


class TestSqueezeBF16Op(OpTest):
    def setUp(self):
        self.op_type = "squeeze"
        self.dtype = np.uint16
        self.init_test_case()
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
69
        self.check_output()
70 71 72 73

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

C
chenweihang 已提交
74
    def init_test_case(self):
Z
zhupengyang 已提交
75
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
76
        self.axes = (0, 2)
Z
zhupengyang 已提交
77
        self.new_shape = (3, 40)
78

C
chenweihang 已提交
79
    def init_attrs(self):
80
        self.attrs = {"axes": self.axes}
81 82


C
chenweihang 已提交
83 84 85
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
86
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
87
        self.axes = (0, -2)
Z
zhupengyang 已提交
88
        self.new_shape = (3, 40)
89 90 91


# Correct: No axes input.
C
chenweihang 已提交
92 93
class TestSqueezeOp2(TestSqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
94
        self.ori_shape = (1, 20, 1, 5)
C
chenweihang 已提交
95
        self.axes = ()
Z
zhupengyang 已提交
96
        self.new_shape = (20, 5)
97 98


99
# Correct: Just part of axes be squeezed.
C
chenweihang 已提交
100 101
class TestSqueezeOp3(TestSqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
102
        self.ori_shape = (6, 1, 5, 1, 4, 1)
C
chenweihang 已提交
103
        self.axes = (1, -1)
Z
zhupengyang 已提交
104
        self.new_shape = (6, 5, 1, 4)
105 106


L
Leo Chen 已提交
107 108 109 110 111 112 113 114
# Correct: The demension of axis is not of size 1 remains unchanged.
class TestSqueezeOp4(TestSqueezeOp):
    def init_test_case(self):
        self.ori_shape = (6, 1, 5, 1, 4, 1)
        self.axes = (1, 2)
        self.new_shape = (6, 5, 1, 4, 1)


115
class TestSqueezeOpError(unittest.TestCase):
116
    def test_errors(self):
117
        paddle.enable_static()
118 119
        with program_guard(Program(), Program()):
            # The input type of softmax_op must be Variable.
120 121 122
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], paddle.CPUPlace()
            )
123
            self.assertRaises(TypeError, paddle.squeeze, x1)
124
            # The input axes of squeeze must be list.
125 126
            x2 = paddle.static.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, paddle.squeeze, x2, axes=0)
127
            # The input dtype of squeeze not support float16.
128 129
            x3 = paddle.static.data(name='x3', shape=[4], dtype="float16")
            self.assertRaises(TypeError, paddle.squeeze, x3, axes=0)
130 131


132
class API_TestSqueeze(unittest.TestCase):
133 134 135 136 137 138
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

139
    def test_out(self):
140
        paddle.enable_static()
141 142 143 144 145 146
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
            data1 = paddle.static.data(
                'data1', shape=[-1, 1, 10], dtype='float64'
            )
147
            result_squeeze = self.squeeze(data1, axis=[1])
148 149
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
150
            input1 = np.random.random([5, 1, 10]).astype('float64')
151 152 153
            (result,) = exe.run(
                feed={"data1": input1}, fetch_list=[result_squeeze]
            )
154
            expected_result = np.squeeze(input1, axis=1)
155
            np.testing.assert_allclose(expected_result, result, rtol=1e-05)
156 157


158 159 160 161 162
class API_TestStaticSqueeze_(API_TestSqueeze):
    def executed_api(self):
        self.squeeze = paddle.squeeze_


163
class API_TestDygraphSqueeze(unittest.TestCase):
164 165 166 167 168 169
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

170
    def test_out(self):
171 172 173
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
174
        output = self.squeeze(input, axis=[1])
175 176
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
177
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
178 179 180 181 182

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
183
        output = self.squeeze(input, axis=[1])
184 185
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
186
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
187 188 189 190 191

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
192
        output = self.squeeze(input, axis=[1])
193 194
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
195
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
196 197

    def test_axis_not_list(self):
198 199 200
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
201
        output = self.squeeze(input, axis=1)
202 203
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
204
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
205 206

    def test_dimension_not_1(self):
207 208 209
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
210
        output = self.squeeze(input, axis=(1, 0))
211 212
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
213
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
214 215


216 217 218 219 220
class API_TestDygraphSqueezeInplace(API_TestDygraphSqueeze):
    def executed_api(self):
        self.squeeze = paddle.squeeze_


221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
class TestSqueezeDoubleGradCheck(unittest.TestCase):
    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

236 237 238
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
239
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
240 241 242
        gradient_checker.double_grad_check_for_dygraph(
            self.squeeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestSqueezeTripleGradCheck(unittest.TestCase):
    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

268 269 270
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
271
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
272 273 274
        gradient_checker.triple_grad_check_for_dygraph(
            self.squeeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
275 276 277 278 279 280 281 282 283 284

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


285 286
if __name__ == "__main__":
    unittest.main()