test_unsqueeze_op.py 12.9 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import gradient_checker
18
import numpy as np
19 20
from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16
21

22 23
import paddle
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25
import paddle.fluid.layers as layers
26

27
paddle.enable_static()
28 29 30


# Correct: General.
31
class TestUnsqueezeOp(OpTest):
32
    def setUp(self):
33
        self.init_test_case()
34
        self.op_type = "unsqueeze"
35
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
36
        self.init_attrs()
37
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
38 39

    def test_check_output(self):
40
        self.check_output()
41 42 43 44

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

45
    def init_test_case(self):
Z
zhupengyang 已提交
46
        self.ori_shape = (3, 40)
47
        self.axes = (1, 2)
Z
zhupengyang 已提交
48
        self.new_shape = (3, 1, 1, 40)
49

C
chenweihang 已提交
50
    def init_attrs(self):
51
        self.attrs = {"axes": self.axes}
C
chenweihang 已提交
52

53

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
class TestUnsqueezeBF16Op(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.dtype = np.uint16
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


80 81 82
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
83
        self.ori_shape = (20, 5)
84
        self.axes = (-1,)
Z
zhupengyang 已提交
85
        self.new_shape = (20, 5, 1)
86 87 88


# Correct: Mixed input axis.
89 90
class TestUnsqueezeOp2(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
91
        self.ori_shape = (20, 5)
92
        self.axes = (0, -1)
Z
zhupengyang 已提交
93
        self.new_shape = (1, 20, 5, 1)
94 95


96
# Correct: There is duplicated axis.
97 98
class TestUnsqueezeOp3(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
99
        self.ori_shape = (10, 2, 5)
100
        self.axes = (0, 3, 3)
Z
zhupengyang 已提交
101
        self.new_shape = (1, 10, 2, 1, 1, 5)
102 103


104 105 106
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
107
        self.ori_shape = (10, 2, 5)
108
        self.axes = (3, 1, 1)
Z
zhupengyang 已提交
109
        self.new_shape = (10, 1, 1, 2, 5, 1)
110 111


112 113 114
class TestUnsqueezeOp_ZeroDim1(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
115 116
        self.axes = (-1,)
        self.new_shape = 1
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132


class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (-1, 1)
        self.new_shape = (1, 1)


class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (0, 1, 2)
        self.new_shape = (1, 1, 1)


133 134
class API_TestUnsqueeze(unittest.TestCase):
    def test_out(self):
135
        paddle.enable_static()
136 137 138
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
139
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
140
            result_squeeze = paddle.unsqueeze(data1, axis=[1])
141 142
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
143 144
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input = np.squeeze(input1, axis=1)
145 146 147
            (result,) = exe.run(
                feed={"data1": input}, fetch_list=[result_squeeze]
            )
148
            np.testing.assert_allclose(input1, result, rtol=1e-05)
149 150 151 152


class TestUnsqueezeOpError(unittest.TestCase):
    def test_errors(self):
153
        paddle.enable_static()
154 155 156
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
157 158
            # The type of axis in split_op should be int or Variable.
            def test_axes_type():
159 160 161
                x6 = paddle.static.data(
                    shape=[-1, 10], dtype='float16', name='x3'
                )
162
                paddle.unsqueeze(x6, axis=3.2)
163 164 165 166 167 168

            self.assertRaises(TypeError, test_axes_type)


class API_TestUnsqueeze2(unittest.TestCase):
    def test_out(self):
169
        paddle.enable_static()
170 171 172
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
173 174
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
175
            result_squeeze = paddle.unsqueeze(data1, axis=data2)
176 177
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
178 179 180
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1, axis=1)
181 182 183 184
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
185
            np.testing.assert_allclose(input1, result1, rtol=1e-05)
186 187 188 189


class API_TestUnsqueeze3(unittest.TestCase):
    def test_out(self):
190
        paddle.enable_static()
191 192 193
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
194 195
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
196
            result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3])
197 198
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
199 200 201
            input1 = np.random.random([5, 1, 10, 1]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1)
202 203 204 205
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
206
            np.testing.assert_array_equal(input1, result1)
L
Leo Chen 已提交
207
            self.assertEqual(input1.shape, result1.shape)
208 209 210 211


class API_TestDyUnsqueeze(unittest.TestCase):
    def test_out(self):
212 213 214 215 216 217
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input1 = np.expand_dims(input_1, axis=1)
        input = paddle.to_tensor(input_1)
        output = paddle.unsqueeze(input, axis=[1])
        out_np = output.numpy()
218
        np.testing.assert_array_equal(input1, out_np)
219
        self.assertEqual(input1.shape, out_np.shape)
220 221 222 223


class API_TestDyUnsqueeze2(unittest.TestCase):
    def test_out(self):
224 225 226 227 228 229
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=1)
        out_np = output.numpy()
230
        np.testing.assert_array_equal(out1, out_np)
231
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
232 233 234 235


class API_TestDyUnsqueezeAxisTensor(unittest.TestCase):
    def test_out(self):
236 237 238 239 240 241 242
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2]))
        out_np = output.numpy()
243
        np.testing.assert_array_equal(out1, out_np)
244
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
245 246 247 248


class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase):
    def test_out(self):
249 250 251 252 253 254 255 256
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        # Actually, expand_dims supports tuple since version 1.18.0
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(
            paddle.to_tensor(input1),
257 258
            axis=[paddle.to_tensor([1]), paddle.to_tensor([2])],
        )
259
        out_np = output.numpy()
260
        np.testing.assert_array_equal(out1, out_np)
261 262 263 264
        self.assertEqual(out1.shape, out_np.shape)


class API_TestDygraphUnSqueeze(unittest.TestCase):
265 266 267 268 269 270
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze

271 272 273 274
    def test_out(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
275
        output = self.unsqueeze(input, axis=[1])
276 277
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
278
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
279 280 281 282 283

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
284
        output = self.unsqueeze(input, axis=[1])
285 286
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
287
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
288 289 290 291 292

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
293
        output = self.unsqueeze(input, axis=1)
294 295
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
296
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
297 298 299 300 301

    def test_axis_not_list(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
302
        output = self.unsqueeze(input, axis=1)
303 304
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
305
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
306 307 308 309 310

    def test_dimension_not_1(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
311
        output = self.unsqueeze(input, axis=(1, 2))
312
        out_np = output.numpy()
313 314
        expected_out = np.expand_dims(input_1, axis=(1, 2))
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
315 316


317 318 319 320 321
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze_


322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

337 338 339
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
340
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
341 342 343
        gradient_checker.double_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestUnsqueezeTripleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

369 370 371
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
372
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
373 374 375
        gradient_checker.triple_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
376 377 378 379 380 381 382 383 384 385

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


386 387
if __name__ == "__main__":
    unittest.main()