test_unsqueeze_op.py 13.3 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16
import unittest
17

18
import numpy as np
19

20 21
import paddle
import paddle.fluid as fluid
22 23
from op_test import OpTest, convert_float_to_uint16
import paddle.fluid.core as core
24 25 26
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
27

28
paddle.enable_static()
29 30 31


# Correct: General.
32
class TestUnsqueezeOp(OpTest):
33

34
    def setUp(self):
35
        self.init_test_case()
36
        self.op_type = "unsqueeze"
37
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
38
        self.init_attrs()
39
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
40 41

    def test_check_output(self):
42
        self.check_output()
43 44 45 46

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

47
    def init_test_case(self):
Z
zhupengyang 已提交
48
        self.ori_shape = (3, 40)
49
        self.axes = (1, 2)
Z
zhupengyang 已提交
50
        self.new_shape = (3, 1, 1, 40)
51

C
chenweihang 已提交
52
    def init_attrs(self):
53
        self.attrs = {"axes": self.axes}
C
chenweihang 已提交
54

55

56
class TestUnsqueezeBF16Op(OpTest):
57

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.dtype = np.uint16
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


83 84
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
85

86
    def init_test_case(self):
Z
zhupengyang 已提交
87
        self.ori_shape = (20, 5)
88
        self.axes = (-1, )
Z
zhupengyang 已提交
89
        self.new_shape = (20, 5, 1)
90 91 92


# Correct: Mixed input axis.
93
class TestUnsqueezeOp2(TestUnsqueezeOp):
94

95
    def init_test_case(self):
Z
zhupengyang 已提交
96
        self.ori_shape = (20, 5)
97
        self.axes = (0, -1)
Z
zhupengyang 已提交
98
        self.new_shape = (1, 20, 5, 1)
99 100


101
# Correct: There is duplicated axis.
102
class TestUnsqueezeOp3(TestUnsqueezeOp):
103

104
    def init_test_case(self):
Z
zhupengyang 已提交
105
        self.ori_shape = (10, 2, 5)
106
        self.axes = (0, 3, 3)
Z
zhupengyang 已提交
107
        self.new_shape = (1, 10, 2, 1, 1, 5)
108 109


110 111
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
112

113
    def init_test_case(self):
Z
zhupengyang 已提交
114
        self.ori_shape = (10, 2, 5)
115
        self.axes = (3, 1, 1)
Z
zhupengyang 已提交
116
        self.new_shape = (10, 1, 1, 2, 5, 1)
117 118


119
class API_TestUnsqueeze(unittest.TestCase):
120

121
    def test_out(self):
122 123 124 125
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
126
            result_squeeze = paddle.unsqueeze(data1, axis=[1])
127 128
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
129 130 131 132
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input = np.squeeze(input1, axis=1)
            result, = exe.run(feed={"data1": input},
                              fetch_list=[result_squeeze])
133
            np.testing.assert_allclose(input1, result, rtol=1e-05)
134 135 136


class TestUnsqueezeOpError(unittest.TestCase):
137

138
    def test_errors(self):
139 140 141
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
142 143
            # The type of axis in split_op should be int or Variable.
            def test_axes_type():
144 145 146
                x6 = paddle.static.data(shape=[-1, 10],
                                        dtype='float16',
                                        name='x3')
147
                paddle.unsqueeze(x6, axis=3.2)
148 149 150 151 152

            self.assertRaises(TypeError, test_axes_type)


class API_TestUnsqueeze2(unittest.TestCase):
153

154
    def test_out(self):
155 156 157 158 159
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
160
            result_squeeze = paddle.unsqueeze(data1, axis=data2)
161 162
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
163 164 165
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1, axis=1)
166 167 168 169
            result1, = exe.run(feed={
                "data1": input,
                "data2": input2
            },
170
                               fetch_list=[result_squeeze])
171
            np.testing.assert_allclose(input1, result1, rtol=1e-05)
172 173 174


class API_TestUnsqueeze3(unittest.TestCase):
175

176
    def test_out(self):
177 178 179 180 181
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
182
            result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3])
183 184
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
185 186 187
            input1 = np.random.random([5, 1, 10, 1]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1)
188 189 190 191
            result1, = exe.run(feed={
                "data1": input,
                "data2": input2
            },
192
                               fetch_list=[result_squeeze])
193
            np.testing.assert_array_equal(input1, result1)
L
Leo Chen 已提交
194
            self.assertEqual(input1.shape, result1.shape)
195 196 197


class API_TestDyUnsqueeze(unittest.TestCase):
198

199
    def test_out(self):
200 201 202 203 204 205
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input1 = np.expand_dims(input_1, axis=1)
        input = paddle.to_tensor(input_1)
        output = paddle.unsqueeze(input, axis=[1])
        out_np = output.numpy()
206
        np.testing.assert_array_equal(input1, out_np)
207
        self.assertEqual(input1.shape, out_np.shape)
208 209 210


class API_TestDyUnsqueeze2(unittest.TestCase):
211

212
    def test_out(self):
213 214 215 216 217 218
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=1)
        out_np = output.numpy()
219
        np.testing.assert_array_equal(out1, out_np)
220
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
221 222 223


class API_TestDyUnsqueezeAxisTensor(unittest.TestCase):
224

L
Leo Chen 已提交
225
    def test_out(self):
226 227 228 229 230 231 232
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2]))
        out_np = output.numpy()
233
        np.testing.assert_array_equal(out1, out_np)
234
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
235 236 237


class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase):
238

L
Leo Chen 已提交
239
    def test_out(self):
240 241 242 243 244 245 246 247
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        # Actually, expand_dims supports tuple since version 1.18.0
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(
            paddle.to_tensor(input1),
248 249
            axis=[paddle.to_tensor([1]),
                  paddle.to_tensor([2])])
250
        out_np = output.numpy()
251
        np.testing.assert_array_equal(out1, out_np)
252 253 254 255
        self.assertEqual(out1.shape, out_np.shape)


class API_TestDygraphUnSqueeze(unittest.TestCase):
256

257 258 259 260 261 262
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze

263 264 265 266
    def test_out(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
267
        output = self.unsqueeze(input, axis=[1])
268 269
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
270
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
271 272 273 274 275

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
276
        output = self.unsqueeze(input, axis=[1])
277 278
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
279
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
280 281 282 283 284

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
285
        output = self.unsqueeze(input, axis=1)
286 287
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
288
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
289 290 291 292 293

    def test_axis_not_list(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
294
        output = self.unsqueeze(input, axis=1)
295 296
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
297
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
298 299 300 301 302

    def test_dimension_not_1(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
303
        output = self.unsqueeze(input, axis=(1, 2))
304
        out_np = output.numpy()
305 306
        expected_out = np.expand_dims(input_1, axis=(1, 2))
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
307 308


309
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
310

311 312 313 314
    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze_


315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):

    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.unsqueeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestUnsqueezeTripleGradCheck(unittest.TestCase):

    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.unsqueeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


389 390
if __name__ == "__main__":
    unittest.main()