test_squeeze_op.py 9.9 KB
Newer Older
1
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16
import unittest
17

18
import numpy as np
19 20

import paddle
21 22
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
23 24
from op_test import OpTest, convert_float_to_uint16
import paddle.fluid.core as core
25 26 27
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
28

29
paddle.enable_static()
30 31 32


# Correct: General.
C
chenweihang 已提交
33
class TestSqueezeOp(OpTest):
34

35
    def setUp(self):
36
        self.op_type = "squeeze"
C
chenweihang 已提交
37
        self.init_test_case()
38
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
39
        self.init_attrs()
40 41 42
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
        }
43 44

    def test_check_output(self):
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (1, 3, 1, 40)
        self.axes = (0, 2)
        self.new_shape = (3, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


class TestSqueezeBF16Op(OpTest):
60

61 62 63 64 65 66 67 68 69 70 71
    def setUp(self):
        self.op_type = "squeeze"
        self.dtype = np.uint16
        self.init_test_case()
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
72
        self.check_output()
73 74 75 76

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

C
chenweihang 已提交
77
    def init_test_case(self):
Z
zhupengyang 已提交
78
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
79
        self.axes = (0, 2)
Z
zhupengyang 已提交
80
        self.new_shape = (3, 40)
81

C
chenweihang 已提交
82
    def init_attrs(self):
83
        self.attrs = {"axes": self.axes}
84 85


C
chenweihang 已提交
86 87
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
88

C
chenweihang 已提交
89
    def init_test_case(self):
Z
zhupengyang 已提交
90
        self.ori_shape = (1, 3, 1, 40)
C
chenweihang 已提交
91
        self.axes = (0, -2)
Z
zhupengyang 已提交
92
        self.new_shape = (3, 40)
93 94 95


# Correct: No axes input.
C
chenweihang 已提交
96
class TestSqueezeOp2(TestSqueezeOp):
97

C
chenweihang 已提交
98
    def init_test_case(self):
Z
zhupengyang 已提交
99
        self.ori_shape = (1, 20, 1, 5)
C
chenweihang 已提交
100
        self.axes = ()
Z
zhupengyang 已提交
101
        self.new_shape = (20, 5)
102 103


104
# Correct: Just part of axes be squeezed.
C
chenweihang 已提交
105
class TestSqueezeOp3(TestSqueezeOp):
106

C
chenweihang 已提交
107
    def init_test_case(self):
Z
zhupengyang 已提交
108
        self.ori_shape = (6, 1, 5, 1, 4, 1)
C
chenweihang 已提交
109
        self.axes = (1, -1)
Z
zhupengyang 已提交
110
        self.new_shape = (6, 5, 1, 4)
111 112


L
Leo Chen 已提交
113 114
# Correct: The demension of axis is not of size 1 remains unchanged.
class TestSqueezeOp4(TestSqueezeOp):
115

L
Leo Chen 已提交
116 117 118 119 120 121
    def init_test_case(self):
        self.ori_shape = (6, 1, 5, 1, 4, 1)
        self.axes = (1, 2)
        self.new_shape = (6, 5, 1, 4, 1)


122
class TestSqueezeOpError(unittest.TestCase):
123

124
    def test_errors(self):
125
        paddle.enable_static()
126 127
        with program_guard(Program(), Program()):
            # The input type of softmax_op must be Variable.
128 129
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         paddle.CPUPlace())
130
            self.assertRaises(TypeError, paddle.squeeze, x1)
131
            # The input axes of squeeze must be list.
132 133
            x2 = paddle.static.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, paddle.squeeze, x2, axes=0)
134
            # The input dtype of squeeze not support float16.
135 136
            x3 = paddle.static.data(name='x3', shape=[4], dtype="float16")
            self.assertRaises(TypeError, paddle.squeeze, x3, axes=0)
137 138


139
class API_TestSqueeze(unittest.TestCase):
140

141 142 143 144 145 146
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

147
    def test_out(self):
148 149 150
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
151 152 153
            data1 = paddle.static.data('data1',
                                       shape=[-1, 1, 10],
                                       dtype='float64')
154
            result_squeeze = self.squeeze(data1, axis=[1])
155 156
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
157 158 159 160
            input1 = np.random.random([5, 1, 10]).astype('float64')
            result, = exe.run(feed={"data1": input1},
                              fetch_list=[result_squeeze])
            expected_result = np.squeeze(input1, axis=1)
161
            np.testing.assert_allclose(expected_result, result, rtol=1e-05)
162 163


164
class API_TestStaticSqueeze_(API_TestSqueeze):
165

166 167 168 169
    def executed_api(self):
        self.squeeze = paddle.squeeze_


170
class API_TestDygraphSqueeze(unittest.TestCase):
171

172 173 174 175 176 177
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.squeeze = paddle.squeeze

178
    def test_out(self):
179 180 181
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
182
        output = self.squeeze(input, axis=[1])
183 184
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
185
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
186 187 188 189 190

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
191
        output = self.squeeze(input, axis=[1])
192 193
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
194
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
195 196 197 198 199

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
200
        output = self.squeeze(input, axis=[1])
201 202
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
203
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
204 205

    def test_axis_not_list(self):
206 207 208
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
209
        output = self.squeeze(input, axis=1)
210 211
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
212
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
L
Leo Chen 已提交
213 214

    def test_dimension_not_1(self):
215 216 217
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
218
        output = self.squeeze(input, axis=(1, 0))
219 220
        out_np = output.numpy()
        expected_out = np.squeeze(input_1, axis=1)
221
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
222 223


224
class API_TestDygraphSqueezeInplace(API_TestDygraphSqueeze):
225

226 227 228 229
    def executed_api(self):
        self.squeeze = paddle.squeeze_


230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
class TestSqueezeDoubleGradCheck(unittest.TestCase):

    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.squeeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestSqueezeTripleGradCheck(unittest.TestCase):

    def squeeze_wrapper(self, x):
        return paddle.squeeze(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.squeeze(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.squeeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


304 305
if __name__ == "__main__":
    unittest.main()