test_unsqueeze_op.py 13.7 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import gradient_checker
18
import numpy as np
19
from decorator_helper import prog_scope
W
wanghuancoder 已提交
20
from eager_op_test import OpTest, convert_float_to_uint16
21

22
import paddle
23 24
from paddle import fluid
from paddle.fluid import core
25

26
paddle.enable_static()
27 28 29


# Correct: General.
30
class TestUnsqueezeOp(OpTest):
31
    def setUp(self):
32
        self.init_test_case()
33
        self.op_type = "unsqueeze"
34
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
35
        self.init_attrs()
36
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


class TestUnsqueezeFP16Op(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float16")}
        self.init_attrs()
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
60 61

    def test_check_output(self):
62
        self.check_output()
63 64 65 66

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

67
    def init_test_case(self):
Z
zhupengyang 已提交
68
        self.ori_shape = (3, 40)
69
        self.axes = (1, 2)
Z
zhupengyang 已提交
70
        self.new_shape = (3, 1, 1, 40)
71

C
chenweihang 已提交
72
    def init_attrs(self):
73
        self.attrs = {"axes": self.axes}
C
chenweihang 已提交
74

75

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
class TestUnsqueezeBF16Op(OpTest):
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.dtype = np.uint16
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


102 103 104
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
105
        self.ori_shape = (20, 5)
106
        self.axes = (-1,)
Z
zhupengyang 已提交
107
        self.new_shape = (20, 5, 1)
108 109 110


# Correct: Mixed input axis.
111 112
class TestUnsqueezeOp2(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
113
        self.ori_shape = (20, 5)
114
        self.axes = (0, -1)
Z
zhupengyang 已提交
115
        self.new_shape = (1, 20, 5, 1)
116 117


118
# Correct: There is duplicated axis.
119 120
class TestUnsqueezeOp3(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
121
        self.ori_shape = (10, 2, 5)
122
        self.axes = (0, 3, 3)
Z
zhupengyang 已提交
123
        self.new_shape = (1, 10, 2, 1, 1, 5)
124 125


126 127 128
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
    def init_test_case(self):
Z
zhupengyang 已提交
129
        self.ori_shape = (10, 2, 5)
130
        self.axes = (3, 1, 1)
Z
zhupengyang 已提交
131
        self.new_shape = (10, 1, 1, 2, 5, 1)
132 133


134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
# axis is empty, x is ND
class TestUnsqueezeOp5(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = ()
        self.new_shape = ()


# axis is empty, x is 0D
class TestUnsqueezeOp6(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = (10, 2, 5)
        self.axes = ()
        self.new_shape = (10, 2, 5)


150 151 152
class TestUnsqueezeOp_ZeroDim1(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
153 154
        self.axes = (-1,)
        self.new_shape = 1
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170


class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (-1, 1)
        self.new_shape = (1, 1)


class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp):
    def init_test_case(self):
        self.ori_shape = ()
        self.axes = (0, 1, 2)
        self.new_shape = (1, 1, 1)


171 172
class API_TestUnsqueeze(unittest.TestCase):
    def test_out(self):
173
        paddle.enable_static()
174 175 176
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
177
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
178
            result_squeeze = paddle.unsqueeze(data1, axis=[1])
179 180
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
181 182
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input = np.squeeze(input1, axis=1)
183 184 185
            (result,) = exe.run(
                feed={"data1": input}, fetch_list=[result_squeeze]
            )
186
            np.testing.assert_allclose(input1, result, rtol=1e-05)
187 188 189 190


class TestUnsqueezeOpError(unittest.TestCase):
    def test_errors(self):
191
        paddle.enable_static()
192 193 194
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
195 196
            # The type of axis in split_op should be int or Variable.
            def test_axes_type():
197 198 199
                x6 = paddle.static.data(
                    shape=[-1, 10], dtype='float16', name='x3'
                )
200
                paddle.unsqueeze(x6, axis=3.2)
201 202 203 204 205 206

            self.assertRaises(TypeError, test_axes_type)


class API_TestUnsqueeze2(unittest.TestCase):
    def test_out(self):
207
        paddle.enable_static()
208 209 210
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
211 212
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
213
            result_squeeze = paddle.unsqueeze(data1, axis=data2)
214 215
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
216 217 218
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1, axis=1)
219 220 221 222
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
223
            np.testing.assert_allclose(input1, result1, rtol=1e-05)
224 225 226 227


class API_TestUnsqueeze3(unittest.TestCase):
    def test_out(self):
228
        paddle.enable_static()
229 230 231
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
232 233
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
234
            result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3])
235 236
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
237 238 239
            input1 = np.random.random([5, 1, 10, 1]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1)
240 241 242 243
            (result1,) = exe.run(
                feed={"data1": input, "data2": input2},
                fetch_list=[result_squeeze],
            )
244
            np.testing.assert_array_equal(input1, result1)
L
Leo Chen 已提交
245
            self.assertEqual(input1.shape, result1.shape)
246 247 248 249


class API_TestDyUnsqueeze(unittest.TestCase):
    def test_out(self):
250 251 252 253 254 255
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input1 = np.expand_dims(input_1, axis=1)
        input = paddle.to_tensor(input_1)
        output = paddle.unsqueeze(input, axis=[1])
        out_np = output.numpy()
256
        np.testing.assert_array_equal(input1, out_np)
257
        self.assertEqual(input1.shape, out_np.shape)
258 259 260 261


class API_TestDyUnsqueeze2(unittest.TestCase):
    def test_out(self):
262 263 264 265 266 267
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=1)
        out_np = output.numpy()
268
        np.testing.assert_array_equal(out1, out_np)
269
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
270 271 272 273


class API_TestDyUnsqueezeAxisTensor(unittest.TestCase):
    def test_out(self):
274 275 276 277 278 279 280
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2]))
        out_np = output.numpy()
281
        np.testing.assert_array_equal(out1, out_np)
282
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
283 284 285 286


class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase):
    def test_out(self):
287 288 289 290 291 292 293 294
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        # Actually, expand_dims supports tuple since version 1.18.0
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(
            paddle.to_tensor(input1),
295 296
            axis=[paddle.to_tensor([1]), paddle.to_tensor([2])],
        )
297
        out_np = output.numpy()
298
        np.testing.assert_array_equal(out1, out_np)
299 300 301 302
        self.assertEqual(out1.shape, out_np.shape)


class API_TestDygraphUnSqueeze(unittest.TestCase):
303 304 305 306 307 308
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze

309 310 311 312
    def test_out(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
313
        output = self.unsqueeze(input, axis=[1])
314 315
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
316
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
317 318 319 320 321

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
322
        output = self.unsqueeze(input, axis=[1])
323 324
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
325
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
326 327 328 329 330

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
331
        output = self.unsqueeze(input, axis=1)
332 333
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
334
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
335 336 337 338 339

    def test_axis_not_list(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
340
        output = self.unsqueeze(input, axis=1)
341 342
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
343
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
344 345 346 347 348

    def test_dimension_not_1(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
349
        output = self.unsqueeze(input, axis=(1, 2))
350
        out_np = output.numpy()
351 352
        expected_out = np.expand_dims(input_1, axis=(1, 2))
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
353 354


355 356 357 358 359
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze_


360 361 362 363 364 365 366 367 368 369
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
370
        data = paddle.static.data('data', [2, 3, 4], dtype)
371 372 373 374
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

375 376 377 378 379 380
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestUnsqueezeTripleGradCheck(unittest.TestCase):
    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
401
        data = paddle.static.data('data', [2, 3, 4], dtype)
402 403 404 405
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

406 407 408 409 410 411
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place
        )
412 413 414 415 416 417 418 419 420 421

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


422 423
if __name__ == "__main__":
    unittest.main()