test_unsqueeze_op.py 12.7 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import gradient_checker
18
import numpy as np
19 20
from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16
21

22 23
import paddle
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25

26
paddle.enable_static()
27 28 29


# Correct: General.
30
class TestUnsqueezeOp(OpTest):
31
    def setUp(self):
32
        self.init_test_case()
33
        self.op_type = "unsqueeze"
34
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
35
        self.init_attrs()
36
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
37 38

    def test_check_output(self):
39
        self.check_output()
40 41 42 43

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

44
    def init_test_case(self):
Z
zhupengyang 已提交
45
        self.ori_shape = (3, 40)
46
        self.axes = (1, 2)
Z
zhupengyang 已提交
47
        self.new_shape = (3, 1, 1, 40)
48

C
chenweihang 已提交
49
    def init_attrs(self):
50
        self.attrs = {"axes": self.axes}
C
chenweihang 已提交
51

52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
class TestUnsqueezeBF16Op(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.dtype = np.uint16
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


79 80 81
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
82
        self.ori_shape = (20, 5)
83
        self.axes = (-1,)
Z
zhupengyang 已提交
84
        self.new_shape = (20, 5, 1)
85 86 87


# Correct: Mixed input axis.
88 89
class TestUnsqueezeOp2(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
90
        self.ori_shape = (20, 5)
91
        self.axes = (0, -1)
Z
zhupengyang 已提交
92
        self.new_shape = (1, 20, 5, 1)
93 94


95
# Correct: There is duplicated axis.
96 97
class TestUnsqueezeOp3(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
98
        self.ori_shape = (10, 2, 5)
99
        self.axes = (0, 3, 3)
Z
zhupengyang 已提交
100
        self.new_shape = (1, 10, 2, 1, 1, 5)
101 102


103 104 105
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
106
        self.ori_shape = (10, 2, 5)
107
        self.axes = (3, 1, 1)
Z
zhupengyang 已提交
108
        self.new_shape = (10, 1, 1, 2, 5, 1)
109 110


111 112 113
class TestUnsqueezeOp_ZeroDim1(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
114 115
        self.axes = (-1,)
        self.new_shape = 1
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131


class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (-1, 1)
        self.new_shape = (1, 1)


class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (0, 1, 2)
        self.new_shape = (1, 1, 1)


132 133
class API_TestUnsqueeze(unittest.TestCase):
    def test_out(self):
134
        paddle.enable_static()
135 136 137
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
138
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
139
            result_squeeze = paddle.unsqueeze(data1, axis=[1])
140 141
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
142 143
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input = np.squeeze(input1, axis=1)
144 145 146
            (result,) = exe.run(
                feed={"data1": input}, fetch_list=[result_squeeze]
            )
147
            np.testing.assert_allclose(input1, result, rtol=1e-05)
148 149 150 151


class TestUnsqueezeOpError(unittest.TestCase):
    def test_errors(self):
152
        paddle.enable_static()
153 154 155
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
156 157
            # The type of axis in split_op should be int or Variable.
            def test_axes_type():
158 159 160
                x6 = paddle.static.data(
                    shape=[-1, 10], dtype='float16', name='x3'
                )
161
                paddle.unsqueeze(x6, axis=3.2)
162 163 164 165 166 167

            self.assertRaises(TypeError, test_axes_type)


class API_TestUnsqueeze2(unittest.TestCase):
    def test_out(self):
168
        paddle.enable_static()
169 170 171
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
172 173
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
174
            result_squeeze = paddle.unsqueeze(data1, axis=data2)
175 176
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
177 178 179
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1, axis=1)
180 181 182 183
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
184
            np.testing.assert_allclose(input1, result1, rtol=1e-05)
185 186 187 188


class API_TestUnsqueeze3(unittest.TestCase):
    def test_out(self):
189
        paddle.enable_static()
190 191 192
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
193 194
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
195
            result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3])
196 197
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
198 199 200
            input1 = np.random.random([5, 1, 10, 1]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1)
201 202 203 204
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
205
            np.testing.assert_array_equal(input1, result1)
L
Leo Chen 已提交
206
            self.assertEqual(input1.shape, result1.shape)
207 208 209 210


class API_TestDyUnsqueeze(unittest.TestCase):
    def test_out(self):
211 212 213 214 215 216
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input1 = np.expand_dims(input_1, axis=1)
        input = paddle.to_tensor(input_1)
        output = paddle.unsqueeze(input, axis=[1])
        out_np = output.numpy()
217
        np.testing.assert_array_equal(input1, out_np)
218
        self.assertEqual(input1.shape, out_np.shape)
219 220 221 222


class API_TestDyUnsqueeze2(unittest.TestCase):
    def test_out(self):
223 224 225 226 227 228
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=1)
        out_np = output.numpy()
229
        np.testing.assert_array_equal(out1, out_np)
230
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
231 232 233 234


class API_TestDyUnsqueezeAxisTensor(unittest.TestCase):
    def test_out(self):
235 236 237 238 239 240 241
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2]))
        out_np = output.numpy()
242
        np.testing.assert_array_equal(out1, out_np)
243
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
244 245 246 247


class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase):
    def test_out(self):
248 249 250 251 252 253 254 255
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        # Actually, expand_dims supports tuple since version 1.18.0
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(
            paddle.to_tensor(input1),
256 257
            axis=[paddle.to_tensor([1]), paddle.to_tensor([2])],
        )
258
        out_np = output.numpy()
259
        np.testing.assert_array_equal(out1, out_np)
260 261 262 263
        self.assertEqual(out1.shape, out_np.shape)


class API_TestDygraphUnSqueeze(unittest.TestCase):
264 265 266 267 268 269
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze

270 271 272 273
    def test_out(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
274
        output = self.unsqueeze(input, axis=[1])
275 276
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
277
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
278 279 280 281 282

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
283
        output = self.unsqueeze(input, axis=[1])
284 285
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
286
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
287 288 289 290 291

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
292
        output = self.unsqueeze(input, axis=1)
293 294
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
295
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
296 297 298 299 300

    def test_axis_not_list(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
301
        output = self.unsqueeze(input, axis=1)
302 303
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
304
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
305 306 307 308 309

    def test_dimension_not_1(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
310
        output = self.unsqueeze(input, axis=(1, 2))
311
        out_np = output.numpy()
312 313
        expected_out = np.expand_dims(input_1, axis=(1, 2))
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
314 315


316 317 318 319 320
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze_


321 322 323 324 325 326 327 328 329 330
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
331
        data = paddle.static.data('data', [2, 3, 4], dtype)
332 333 334 335
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

336 337 338 339 340 341
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestUnsqueezeTripleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
362
        data = paddle.static.data('data', [2, 3, 4], dtype)
363 364 365 366
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

367 368 369 370 371 372
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
373 374 375 376 377 378 379 380 381 382

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


383 384
if __name__ == "__main__":
    unittest.main()