test_unsqueeze_op.py 13.8 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18

19 20
import paddle
import paddle.fluid as fluid
21 22
from op_test import OpTest, convert_float_to_uint16
import paddle.fluid.core as core
23 24 25
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
26

27
paddle.enable_static()
28 29 30


# Correct: General.
31
class TestUnsqueezeOp(OpTest):
32

33
    def setUp(self):
34
        self.init_test_case()
35
        self.op_type = "unsqueeze"
36
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
37
        self.init_attrs()
38
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
39 40

    def test_check_output(self):
41
        self.check_output()
42 43 44 45

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

46
    def init_test_case(self):
Z
zhupengyang 已提交
47
        self.ori_shape = (3, 40)
48
        self.axes = (1, 2)
Z
zhupengyang 已提交
49
        self.new_shape = (3, 1, 1, 40)
50

C
chenweihang 已提交
51
    def init_attrs(self):
52
        self.attrs = {"axes": self.axes}
C
chenweihang 已提交
53

54

55
class TestUnsqueezeBF16Op(OpTest):
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.dtype = np.uint16
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


82 83
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
84

85
    def init_test_case(self):
Z
zhupengyang 已提交
86
        self.ori_shape = (20, 5)
87
        self.axes = (-1, )
Z
zhupengyang 已提交
88
        self.new_shape = (20, 5, 1)
89 90 91


# Correct: Mixed input axis.
92
class TestUnsqueezeOp2(TestUnsqueezeOp):
93

94
    def init_test_case(self):
Z
zhupengyang 已提交
95
        self.ori_shape = (20, 5)
96
        self.axes = (0, -1)
Z
zhupengyang 已提交
97
        self.new_shape = (1, 20, 5, 1)
98 99


100
# Correct: There is duplicated axis.
101
class TestUnsqueezeOp3(TestUnsqueezeOp):
102

103
    def init_test_case(self):
Z
zhupengyang 已提交
104
        self.ori_shape = (10, 2, 5)
105
        self.axes = (0, 3, 3)
Z
zhupengyang 已提交
106
        self.new_shape = (1, 10, 2, 1, 1, 5)
107 108


109 110
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
111

112
    def init_test_case(self):
Z
zhupengyang 已提交
113
        self.ori_shape = (10, 2, 5)
114
        self.axes = (3, 1, 1)
Z
zhupengyang 已提交
115
        self.new_shape = (10, 1, 1, 2, 5, 1)
116 117


118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
class TestUnsqueezeOp_ZeroDim1(TestUnsqueezeOp):

    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (-1, )
        self.new_shape = (1)


class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp):

    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (-1, 1)
        self.new_shape = (1, 1)


class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp):

    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (0, 1, 2)
        self.new_shape = (1, 1, 1)


142
class API_TestUnsqueeze(unittest.TestCase):
143

144
    def test_out(self):
145 146 147 148
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
149
            result_squeeze = paddle.unsqueeze(data1, axis=[1])
150 151
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
152 153 154 155
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input = np.squeeze(input1, axis=1)
            result, = exe.run(feed={"data1": input},
                              fetch_list=[result_squeeze])
156
            np.testing.assert_allclose(input1, result, rtol=1e-05)
157 158 159


class TestUnsqueezeOpError(unittest.TestCase):
160

161
    def test_errors(self):
162 163 164
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
165 166
            # The type of axis in split_op should be int or Variable.
            def test_axes_type():
167 168 169
                x6 = paddle.static.data(shape=[-1, 10],
                                        dtype='float16',
                                        name='x3')
170
                paddle.unsqueeze(x6, axis=3.2)
171 172 173 174 175

            self.assertRaises(TypeError, test_axes_type)


class API_TestUnsqueeze2(unittest.TestCase):
176

177
    def test_out(self):
178 179 180 181 182
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
183
            result_squeeze = paddle.unsqueeze(data1, axis=data2)
184 185
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
186 187 188
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1, axis=1)
189 190 191 192
            result1, = exe.run(feed={
                "data1": input,
                "data2": input2
            },
193
                               fetch_list=[result_squeeze])
194
            np.testing.assert_allclose(input1, result1, rtol=1e-05)
195 196 197


class API_TestUnsqueeze3(unittest.TestCase):
198

199
    def test_out(self):
200 201 202 203 204
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
205
            result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3])
206 207
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
208 209 210
            input1 = np.random.random([5, 1, 10, 1]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1)
211 212 213 214
            result1, = exe.run(feed={
                "data1": input,
                "data2": input2
            },
215
                               fetch_list=[result_squeeze])
216
            np.testing.assert_array_equal(input1, result1)
L
Leo Chen 已提交
217
            self.assertEqual(input1.shape, result1.shape)
218 219 220


class API_TestDyUnsqueeze(unittest.TestCase):
221

222
    def test_out(self):
223 224 225 226 227 228
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input1 = np.expand_dims(input_1, axis=1)
        input = paddle.to_tensor(input_1)
        output = paddle.unsqueeze(input, axis=[1])
        out_np = output.numpy()
229
        np.testing.assert_array_equal(input1, out_np)
230
        self.assertEqual(input1.shape, out_np.shape)
231 232 233


class API_TestDyUnsqueeze2(unittest.TestCase):
234

235
    def test_out(self):
236 237 238 239 240 241
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=1)
        out_np = output.numpy()
242
        np.testing.assert_array_equal(out1, out_np)
243
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
244 245 246


class API_TestDyUnsqueezeAxisTensor(unittest.TestCase):
247

L
Leo Chen 已提交
248
    def test_out(self):
249 250 251 252 253 254 255
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2]))
        out_np = output.numpy()
256
        np.testing.assert_array_equal(out1, out_np)
257
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
258 259 260


class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase):
261

L
Leo Chen 已提交
262
    def test_out(self):
263 264 265 266 267 268 269 270
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        # Actually, expand_dims supports tuple since version 1.18.0
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(
            paddle.to_tensor(input1),
271 272
            axis=[paddle.to_tensor([1]),
                  paddle.to_tensor([2])])
273
        out_np = output.numpy()
274
        np.testing.assert_array_equal(out1, out_np)
275 276 277 278
        self.assertEqual(out1.shape, out_np.shape)


class API_TestDygraphUnSqueeze(unittest.TestCase):
279

280 281 282 283 284 285
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze

286 287 288 289
    def test_out(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
290
        output = self.unsqueeze(input, axis=[1])
291 292
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
293
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
294 295 296 297 298

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
299
        output = self.unsqueeze(input, axis=[1])
300 301
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
302
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
303 304 305 306 307

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
308
        output = self.unsqueeze(input, axis=1)
309 310
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
311
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
312 313 314 315 316

    def test_axis_not_list(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
317
        output = self.unsqueeze(input, axis=1)
318 319
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
320
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
321 322 323 324 325

    def test_dimension_not_1(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
326
        output = self.unsqueeze(input, axis=(1, 2))
327
        out_np = output.numpy()
328 329
        expected_out = np.expand_dims(input_1, axis=(1, 2))
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
330 331


332
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
333

334 335 336 337
    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze_


338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):

    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.unsqueeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestUnsqueezeTripleGradCheck(unittest.TestCase):

    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.unsqueeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


412 413
if __name__ == "__main__":
    unittest.main()