test_unsqueeze_op.py 13.3 KB
Newer Older
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18

19 20
import paddle
import paddle.fluid as fluid
21 22
from op_test import OpTest, convert_float_to_uint16
import paddle.fluid.core as core
23 24 25
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
26

27
paddle.enable_static()
28 29 30


# Correct: General.
31
class TestUnsqueezeOp(OpTest):
32

33
    def setUp(self):
34
        self.init_test_case()
35
        self.op_type = "unsqueeze"
36
        self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
C
chenweihang 已提交
37
        self.init_attrs()
38
        self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
39 40

    def test_check_output(self):
41
        self.check_output()
42 43 44 45

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

46
    def init_test_case(self):
Z
zhupengyang 已提交
47
        self.ori_shape = (3, 40)
48
        self.axes = (1, 2)
Z
zhupengyang 已提交
49
        self.new_shape = (3, 1, 1, 40)
50

C
chenweihang 已提交
51
    def init_attrs(self):
52
        self.attrs = {"axes": self.axes}
C
chenweihang 已提交
53

54

55
class TestUnsqueezeBF16Op(OpTest):
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
    def setUp(self):
        self.init_test_case()
        self.op_type = "unsqueeze"
        self.dtype = np.uint16
        x = np.random.random(self.ori_shape).astype("float32")
        out = x.reshape(self.new_shape)
        self.inputs = {"X": convert_float_to_uint16(x)}
        self.init_attrs()
        self.outputs = {"Out": convert_float_to_uint16(out)}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(["X"], "Out")

    def init_test_case(self):
        self.ori_shape = (3, 40)
        self.axes = (1, 2)
        self.new_shape = (3, 1, 1, 40)

    def init_attrs(self):
        self.attrs = {"axes": self.axes}


82 83
# Correct: Single input index.
class TestUnsqueezeOp1(TestUnsqueezeOp):
84

85
    def init_test_case(self):
Z
zhupengyang 已提交
86
        self.ori_shape = (20, 5)
87
        self.axes = (-1, )
Z
zhupengyang 已提交
88
        self.new_shape = (20, 5, 1)
89 90 91


# Correct: Mixed input axis.
92
class TestUnsqueezeOp2(TestUnsqueezeOp):
93

94
    def init_test_case(self):
Z
zhupengyang 已提交
95
        self.ori_shape = (20, 5)
96
        self.axes = (0, -1)
Z
zhupengyang 已提交
97
        self.new_shape = (1, 20, 5, 1)
98 99


100
# Correct: There is duplicated axis.
101
class TestUnsqueezeOp3(TestUnsqueezeOp):
102

103
    def init_test_case(self):
Z
zhupengyang 已提交
104
        self.ori_shape = (10, 2, 5)
105
        self.axes = (0, 3, 3)
Z
zhupengyang 已提交
106
        self.new_shape = (1, 10, 2, 1, 1, 5)
107 108


109 110
# Correct: Reversed axes.
class TestUnsqueezeOp4(TestUnsqueezeOp):
111

112
    def init_test_case(self):
Z
zhupengyang 已提交
113
        self.ori_shape = (10, 2, 5)
114
        self.axes = (3, 1, 1)
Z
zhupengyang 已提交
115
        self.new_shape = (10, 1, 1, 2, 5, 1)
116 117


118
class API_TestUnsqueeze(unittest.TestCase):
119

120
    def test_out(self):
121 122 123 124
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
125
            result_squeeze = paddle.unsqueeze(data1, axis=[1])
126 127
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
128 129 130 131
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input = np.squeeze(input1, axis=1)
            result, = exe.run(feed={"data1": input},
                              fetch_list=[result_squeeze])
132
            np.testing.assert_allclose(input1, result, rtol=1e-05)
133 134 135


class TestUnsqueezeOpError(unittest.TestCase):
136

137
    def test_errors(self):
138 139 140
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
141 142
            # The type of axis in split_op should be int or Variable.
            def test_axes_type():
143 144 145
                x6 = paddle.static.data(shape=[-1, 10],
                                        dtype='float16',
                                        name='x3')
146
                paddle.unsqueeze(x6, axis=3.2)
147 148 149 150 151

            self.assertRaises(TypeError, test_axes_type)


class API_TestUnsqueeze2(unittest.TestCase):
152

153
    def test_out(self):
154 155 156 157 158
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
159
            result_squeeze = paddle.unsqueeze(data1, axis=data2)
160 161
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
162 163 164
            input1 = np.random.random([5, 1, 10]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1, axis=1)
165 166 167 168
            result1, = exe.run(feed={
                "data1": input,
                "data2": input2
            },
169
                               fetch_list=[result_squeeze])
170
            np.testing.assert_allclose(input1, result1, rtol=1e-05)
171 172 173


class API_TestUnsqueeze3(unittest.TestCase):
174

175
    def test_out(self):
176 177 178 179 180
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64')
            data2 = paddle.static.data('data2', shape=[1], dtype='int32')
181
            result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3])
182 183
            place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
184 185 186
            input1 = np.random.random([5, 1, 10, 1]).astype('float64')
            input2 = np.array([1]).astype('int32')
            input = np.squeeze(input1)
187 188 189 190
            result1, = exe.run(feed={
                "data1": input,
                "data2": input2
            },
191
                               fetch_list=[result_squeeze])
192
            np.testing.assert_array_equal(input1, result1)
L
Leo Chen 已提交
193
            self.assertEqual(input1.shape, result1.shape)
194 195 196


class API_TestDyUnsqueeze(unittest.TestCase):
197

198
    def test_out(self):
199 200 201 202 203 204
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input1 = np.expand_dims(input_1, axis=1)
        input = paddle.to_tensor(input_1)
        output = paddle.unsqueeze(input, axis=[1])
        out_np = output.numpy()
205
        np.testing.assert_array_equal(input1, out_np)
206
        self.assertEqual(input1.shape, out_np.shape)
207 208 209


class API_TestDyUnsqueeze2(unittest.TestCase):
210

211
    def test_out(self):
212 213 214 215 216 217
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=1)
        out_np = output.numpy()
218
        np.testing.assert_array_equal(out1, out_np)
219
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
220 221 222


class API_TestDyUnsqueezeAxisTensor(unittest.TestCase):
223

L
Leo Chen 已提交
224
    def test_out(self):
225 226 227 228 229 230 231
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2]))
        out_np = output.numpy()
232
        np.testing.assert_array_equal(out1, out_np)
233
        self.assertEqual(out1.shape, out_np.shape)
L
Leo Chen 已提交
234 235 236


class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase):
237

L
Leo Chen 已提交
238
    def test_out(self):
239 240 241 242 243 244 245 246
        paddle.disable_static()
        input1 = np.random.random([5, 10]).astype("int32")
        # Actually, expand_dims supports tuple since version 1.18.0
        out1 = np.expand_dims(input1, axis=1)
        out1 = np.expand_dims(out1, axis=2)
        input = paddle.to_tensor(input1)
        output = paddle.unsqueeze(
            paddle.to_tensor(input1),
247 248
            axis=[paddle.to_tensor([1]),
                  paddle.to_tensor([2])])
249
        out_np = output.numpy()
250
        np.testing.assert_array_equal(out1, out_np)
251 252 253 254
        self.assertEqual(out1.shape, out_np.shape)


class API_TestDygraphUnSqueeze(unittest.TestCase):
255

256 257 258 259 260 261
    def setUp(self):
        self.executed_api()

    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze

262 263 264 265
    def test_out(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
266
        output = self.unsqueeze(input, axis=[1])
267 268
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
269
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
270 271 272 273 274

    def test_out_int8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int8")
        input = paddle.to_tensor(input_1)
275
        output = self.unsqueeze(input, axis=[1])
276 277
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
278
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
279 280 281 282 283

    def test_out_uint8(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("uint8")
        input = paddle.to_tensor(input_1)
284
        output = self.unsqueeze(input, axis=1)
285 286
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
287
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
288 289 290 291 292

    def test_axis_not_list(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
293
        output = self.unsqueeze(input, axis=1)
294 295
        out_np = output.numpy()
        expected_out = np.expand_dims(input_1, axis=1)
296
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
297 298 299 300 301

    def test_dimension_not_1(self):
        paddle.disable_static()
        input_1 = np.random.random([5, 1, 10]).astype("int32")
        input = paddle.to_tensor(input_1)
302
        output = self.unsqueeze(input, axis=(1, 2))
303
        out_np = output.numpy()
304 305
        expected_out = np.expand_dims(input_1, axis=(1, 2))
        np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
306 307


308
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
309

310 311 312 313
    def executed_api(self):
        self.unsqueeze = paddle.unsqueeze_


314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
class TestUnsqueezeDoubleGradCheck(unittest.TestCase):

    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.unsqueeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestUnsqueezeTripleGradCheck(unittest.TestCase):

    def unsqueeze_wrapper(self, x):
        return paddle.unsqueeze(x[0], [0, 2])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3, 4], False, dtype)
        data.persistable = True
        out = paddle.unsqueeze(data, [0, 2])
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.unsqueeze_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


388 389
if __name__ == "__main__":
    unittest.main()