test_unsqueeze_op.py 13.1 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import gradient_checker
18
import numpy as np
19
from decorator_helper import prog_scope
W
wanghuancoder 已提交
20
from eager_op_test import OpTest, convert_float_to_uint16
21

22
import paddle
23 24
from paddle import fluid
from paddle.fluid import core
25

26
paddle.enable_static()
27 28 29


# Correct: General.
30
class TestUnsqueezeOp(OpTest):
31
    def setUp(self):
32
        self.init_test_case()
33
        self.op_type = "unsqueeze"
34
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
35
        self.init_attrs()
36
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
37 38

    def test_check_output(self):
39
        self.check_output()
40 41 42 43

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

44
    def init_test_case(self):
Z
zhupengyang 已提交
45
        self.ori_shape = (3, 40)
46
        self.axes = (1, 2)
Z
zhupengyang 已提交
47
        self.new_shape = (3, 1, 1, 40)
48

C
chenweihang 已提交
49
    def init_attrs(self):
50
        self.attrs = {"axes": self.axes}
C
chenweihang 已提交
51

52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
class TestUnsqueezeBF16Op(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.dtype = np.uint16
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


79 80 81
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
82
        self.ori_shape = (20, 5)
83
        self.axes = (-1,)
Z
zhupengyang 已提交
84
        self.new_shape = (20, 5, 1)
85 86 87


# Correct: Mixed input axis.
88 89
class TestUnsqueezeOp2(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
90
        self.ori_shape = (20, 5)
91
        self.axes = (0, -1)
Z
zhupengyang 已提交
92
        self.new_shape = (1, 20, 5, 1)
93 94


95
# Correct: There is duplicated axis.
96 97
class TestUnsqueezeOp3(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
98
        self.ori_shape = (10, 2, 5)
99
        self.axes = (0, 3, 3)
Z
zhupengyang 已提交
100
        self.new_shape = (1, 10, 2, 1, 1, 5)
101 102


103 104 105
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
106
        self.ori_shape = (10, 2, 5)
107
        self.axes = (3, 1, 1)
Z
zhupengyang 已提交
108
        self.new_shape = (10, 1, 1, 2, 5, 1)
109 110


111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
# axis is empty, x is ND
class TestUnsqueezeOp5(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = ()
        self.new_shape = ()


# axis is empty, x is 0D
class TestUnsqueezeOp6(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = (10, 2, 5)
        self.axes = ()
        self.new_shape = (10, 2, 5)


127 128 129
class TestUnsqueezeOp_ZeroDim1(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
130 131
        self.axes = (-1,)
        self.new_shape = 1
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147


class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (-1, 1)
        self.new_shape = (1, 1)


class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (0, 1, 2)
        self.new_shape = (1, 1, 1)


148 149
class API_TestUnsqueeze(unittest.TestCase):
    def test_out(self):
150
        paddle.enable_static()
151 152 153
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
154
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
155
            result_squeeze = paddle.unsqueeze(data1, axis=[1])
156 157
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
158 159
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input = np.squeeze(input1, axis=1)
160 161 162
            (result,) = exe.run(
                feed={"data1": input}, fetch_list=[result_squeeze]
            )
163
            np.testing.assert_allclose(input1, result, rtol=1e-05)
164 165 166 167


class TestUnsqueezeOpError(unittest.TestCase):
    def test_errors(self):
168
        paddle.enable_static()
169 170 171
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
172 173
            # The type of axis in split_op should be int or Variable.
            def test_axes_type():
174 175 176
                x6 = paddle.static.data(
                    shape=[-1, 10], dtype='float16', name='x3'
                )
177
                paddle.unsqueeze(x6, axis=3.2)
178 179 180 181 182 183

            self.assertRaises(TypeError, test_axes_type)


class API_TestUnsqueeze2(unittest.TestCase):
    def test_out(self):
184
        paddle.enable_static()
185 186 187
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
188 189
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
190
            result_squeeze = paddle.unsqueeze(data1, axis=data2)
191 192
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
193 194 195
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1, axis=1)
196 197 198 199
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
200
            np.testing.assert_allclose(input1, result1, rtol=1e-05)
201 202 203 204


class API_TestUnsqueeze3(unittest.TestCase):
    def test_out(self):
205
        paddle.enable_static()
206 207 208
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
209 210
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
211
            result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3])
212 213
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
214 215 216
            input1 = np.random.random([5, 1, 10, 1]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1)
217 218 219 220
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
221
            np.testing.assert_array_equal(input1, result1)
L
Leo Chen 已提交
222
            self.assertEqual(input1.shape, result1.shape)
223 224 225 226


class API_TestDyUnsqueeze(unittest.TestCase):
    def test_out(self):
227 228 229 230 231 232
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input1 = np.expand_dims(input_1, axis=1)
        input = paddle.to_tensor(input_1)
        output = paddle.unsqueeze(input, axis=[1])
        out_np = output.numpy()
233
        np.testing.assert_array_equal(input1, out_np)
234
        self.assertEqual(input1.shape, out_np.shape)
235 236 237 238


class API_TestDyUnsqueeze2(unittest.TestCase):
    def test_out(self):
239 240 241 242 243 244
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=1)
        out_np = output.numpy()
245
        np.testing.assert_array_equal(out1, out_np)
246
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
247 248 249 250


class API_TestDyUnsqueezeAxisTensor(unittest.TestCase):
    def test_out(self):
251 252 253 254 255 256 257
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2]))
        out_np = output.numpy()
258
        np.testing.assert_array_equal(out1, out_np)
259
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
260 261 262 263


class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase):
    def test_out(self):
264 265 266 267 268 269 270 271
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        # Actually, expand_dims supports tuple since version 1.18.0
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(
            paddle.to_tensor(input1),
272 273
            axis=[paddle.to_tensor([1]), paddle.to_tensor([2])],
        )
274
        out_np = output.numpy()
275
        np.testing.assert_array_equal(out1, out_np)
276 277 278 279
        self.assertEqual(out1.shape, out_np.shape)


class API_TestDygraphUnSqueeze(unittest.TestCase):
280 281 282 283 284 285
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze

286 287 288 289
    def test_out(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
290
        output = self.unsqueeze(input, axis=[1])
291 292
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
293
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
294 295 296 297 298

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
299
        output = self.unsqueeze(input, axis=[1])
300 301
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
302
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
303 304 305 306 307

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
308
        output = self.unsqueeze(input, axis=1)
309 310
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
311
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
312 313 314 315 316

    def test_axis_not_list(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
317
        output = self.unsqueeze(input, axis=1)
318 319
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
320
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
321 322 323 324 325

    def test_dimension_not_1(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
326
        output = self.unsqueeze(input, axis=(1, 2))
327
        out_np = output.numpy()
328 329
        expected_out = np.expand_dims(input_1, axis=(1, 2))
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
330 331


332 333 334 335 336
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze_


337 338 339 340 341 342 343 344 345 346
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
347
        data = paddle.static.data('data', [2, 3, 4], dtype)
348 349 350 351
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

352 353 354 355 356 357
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestUnsqueezeTripleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
378
        data = paddle.static.data('data', [2, 3, 4], dtype)
379 380 381 382
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

383 384 385 386 387 388
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
389 390 391 392 393 394 395 396 397 398

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


399 400
if __name__ == "__main__":
    unittest.main()