test_squeeze_op.py 9.5 KB
Newer Older
1
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import gradient_checker
18
import numpy as np
19
from decorator_helper import prog_scope
W
wanghuancoder 已提交
20
from eager_op_test import OpTest, convert_float_to_uint16
21 22

import paddle
23 24
from paddle import fluid
from paddle.fluid import Program, core, program_guard
25

26
paddle.enable_static()
27 28 29


# Correct: General.
C
chenweihang 已提交
30
class TestSqueezeOp(OpTest):
31
    def setUp(self):
32
        self.op_type = "squeeze"
C
chenweihang 已提交
33
        self.init_test_case()
34
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
35
        self.init_attrs()
36 37 38
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
        }
39 40

    def test_check_output(self):
41 42 43 44 45 46 47 48 49 50 51 52 53 54
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (1, 3, 1, 40)
        self.axes = (0, 2)
        self.new_shape = (3, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
class TestSqueezeFP16Op(OpTest):
    def setUp(self):
        self.op_type = "squeeze"
        self.init_test_case()
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float16")}
        self.init_attrs()
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (1, 3, 1, 40)
        self.axes = (0, 2)
        self.new_shape = (3, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


80 81 82 83 84 85 86 87 88 89 90 91
class TestSqueezeBF16Op(OpTest):
    def setUp(self):
        self.op_type = "squeeze"
        self.dtype = np.uint16
        self.init_test_case()
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
92
        self.check_output()
93 94 95 96

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

C
chenweihang 已提交
97
    def init_test_case(self):
Z
zhupengyang 已提交
98
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
99
        self.axes = (0, 2)
Z
zhupengyang 已提交
100
        self.new_shape = (3, 40)
101

C
chenweihang 已提交
102
    def init_attrs(self):
103
        self.attrs = {"axes": self.axes}
104 105


C
chenweihang 已提交
106 107 108
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
109
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
110
        self.axes = (0, -2)
Z
zhupengyang 已提交
111
        self.new_shape = (3, 40)
112 113 114


# Correct: No axes input.
C
chenweihang 已提交
115 116
class TestSqueezeOp2(TestSqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
117
        self.ori_shape = (1, 20, 1, 5)
C
chenweihang 已提交
118
        self.axes = ()
Z
zhupengyang 已提交
119
        self.new_shape = (20, 5)
120 121


122
# Correct: Just part of axes be squeezed.
C
chenweihang 已提交
123 124
class TestSqueezeOp3(TestSqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
125
        self.ori_shape = (6, 1, 5, 1, 4, 1)
C
chenweihang 已提交
126
        self.axes = (1, -1)
Z
zhupengyang 已提交
127
        self.new_shape = (6, 5, 1, 4)
128 129


L
Leo Chen 已提交
130 131 132 133 134 135 136 137
# Correct: The demension of axis is not of size 1 remains unchanged.
class TestSqueezeOp4(TestSqueezeOp):
    def init_test_case(self):
        self.ori_shape = (6, 1, 5, 1, 4, 1)
        self.axes = (1, 2)
        self.new_shape = (6, 5, 1, 4, 1)


138
class TestSqueezeOpError(unittest.TestCase):
139
    def test_errors(self):
140
        paddle.enable_static()
141 142
        with program_guard(Program(), Program()):
            # The input type of softmax_op must be Variable.
143 144 145
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], paddle.CPUPlace()
            )
146
            self.assertRaises(TypeError, paddle.squeeze, x1)
147
            # The input axes of squeeze must be list.
148 149
            x2 = paddle.static.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, paddle.squeeze, x2, axes=0)
150
            # The input dtype of squeeze not support float16.
151 152
            x3 = paddle.static.data(name='x3', shape=[4], dtype="float16")
            self.assertRaises(TypeError, paddle.squeeze, x3, axes=0)
153 154


155
class API_TestSqueeze(unittest.TestCase):
156 157 158 159 160 161
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

162
    def test_out(self):
163
        paddle.enable_static()
164 165 166 167 168 169
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
            data1 = paddle.static.data(
                'data1', shape=[-1, 1, 10], dtype='float64'
            )
170
            result_squeeze = self.squeeze(data1, axis=[1])
171 172
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
173
            input1 = np.random.random([5, 1, 10]).astype('float64')
174 175 176
            (result,) = exe.run(
                feed={"data1": input1}, fetch_list=[result_squeeze]
            )
177
            expected_result = np.squeeze(input1, axis=1)
178
            np.testing.assert_allclose(expected_result, result, rtol=1e-05)
179 180


181 182 183 184 185
class API_TestStaticSqueeze_(API_TestSqueeze):
    def executed_api(self):
        self.squeeze = paddle.squeeze_


186
class API_TestDygraphSqueeze(unittest.TestCase):
187 188 189 190 191 192
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

193
    def test_out(self):
194 195 196
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
197
        output = self.squeeze(input, axis=[1])
198 199
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
200
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
201 202 203 204 205

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
206
        output = self.squeeze(input, axis=[1])
207 208
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
209
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
210 211 212 213 214

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
215
        output = self.squeeze(input, axis=[1])
216 217
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
218
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
219 220

    def test_axis_not_list(self):
221 222 223
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
224
        output = self.squeeze(input, axis=1)
225 226
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
227
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
228 229

    def test_dimension_not_1(self):
230 231 232
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
233
        output = self.squeeze(input, axis=(1, 0))
234 235
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
236
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
237 238


239 240 241 242 243
class API_TestDygraphSqueezeInplace(API_TestDygraphSqueeze):
    def executed_api(self):
        self.squeeze = paddle.squeeze_


244 245 246 247 248 249 250 251 252 253
class TestSqueezeDoubleGradCheck(unittest.TestCase):
    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
254
        data = paddle.static.data('data', [2, 3], dtype)
255 256 257 258
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

259 260 261 262 263 264
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.squeeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestSqueezeTripleGradCheck(unittest.TestCase):
    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
285
        data = paddle.static.data('data', [2, 3], dtype)
286 287 288 289
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

290 291 292 293 294 295
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.squeeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
296 297 298 299 300 301 302 303 304 305

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


306 307
if __name__ == "__main__":
    unittest.main()