test_flatten_contiguous_range_op.py 15.3 KB
Newer Older
1
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16
19

20
import paddle
21
from paddle.fluid import core
22

23 24 25

class TestFlattenOp(OpTest):
    def setUp(self):
26
        self.python_api = paddle.flatten
27
        self.public_python_api = paddle.flatten
28
        self.python_out_sig = ["Out"]
29
        self.op_type = "flatten_contiguous_range"
30
        self.prim_op_type = "comp"
31 32
        self.start_axis = 0
        self.stop_axis = -1
33
        self.if_enable_cinn()
34
        self.init_test_case()
35 36
        self.init_test_dtype()
        self.init_input_data()
37 38 39
        self.init_attrs()
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
40
            "XShape": np.random.random(self.in_shape).astype("float32"),
41 42
        }

43 44
    def if_enable_cinn(self):
        pass
45

46
    def test_check_output(self):
47 48 49 50 51 52
        if str(self.dtype) in {"float16", "uint16"}:
            self.check_output_with_place(
                core.CUDAPlace(0), no_check_set=["XShape"], check_prim=True
            )
        else:
            self.check_output(no_check_set=["XShape"], check_prim=True)
53 54

    def test_check_grad(self):
55 56 57 58 59 60
        if str(self.dtype) in {"float16", "uint16"}:
            self.check_grad_with_place(
                core.CUDAPlace(0), ["X"], "Out", check_prim=True
            )
        else:
            self.check_grad(["X"], "Out", check_prim=True)
61 62 63 64 65

    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 0
        self.stop_axis = -1
66
        self.new_shape = 120
67 68 69 70

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
71
            "stop_axis": self.stop_axis,
72 73
        }

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
    def init_test_dtype(self):
        self.dtype = "float64"

    def init_input_data(self):
        if str(self.dtype) != "uint16":
            x = np.random.random(self.in_shape).astype(self.dtype)
        else:
            x = np.random.random(self.in_shape).astype("float32")
            x = convert_float_to_uint16(x)

        self.inputs = {"X": x}


class TestFlattenFP32Op(TestFlattenOp):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op(TestFlattenOp):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op(TestFlattenOp):
107 108 109
    def if_enable_cinn(self):
        pass

110 111 112
    def init_test_dtype(self):
        self.dtype = "uint16"

113 114 115 116 117 118 119 120 121 122 123

class TestFlattenOp_1(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 1
        self.stop_axis = 2
        self.new_shape = (3, 10, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
124
            "stop_axis": self.stop_axis,
125 126 127
        }


128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
class TestFlattenFP32Op_1(TestFlattenOp_1):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_1(TestFlattenOp_1):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_1(TestFlattenOp_1):
148 149 150
    def if_enable_cinn(self):
        pass

151 152 153 154
    def init_test_dtype(self):
        self.dtype = "uint16"


155 156 157 158 159 160 161 162 163 164
class TestFlattenOp_2(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 0
        self.stop_axis = 1
        self.new_shape = (6, 5, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
165
            "stop_axis": self.stop_axis,
166 167 168
        }


169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
class TestFlattenFP32Op_2(TestFlattenOp_2):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_2(TestFlattenOp_2):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_2(TestFlattenOp_2):
189 190 191
    def if_enable_cinn(self):
        pass

192 193 194 195
    def init_test_dtype(self):
        self.dtype = "uint16"


196 197 198 199 200 201 202 203 204 205
class TestFlattenOp_3(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 0
        self.stop_axis = 2
        self.new_shape = (30, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
206
            "stop_axis": self.stop_axis,
207 208 209
        }


210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
class TestFlattenFP32Op_3(TestFlattenOp_3):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_3(TestFlattenOp_3):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_3(TestFlattenOp_3):
230 231 232
    def if_enable_cinn(self):
        pass

233 234 235 236
    def init_test_dtype(self):
        self.dtype = "uint16"


237 238 239 240 241 242 243 244 245 246
class TestFlattenOp_4(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = -2
        self.stop_axis = -1
        self.new_shape = (3, 2, 20)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
247
            "stop_axis": self.stop_axis,
248 249 250
        }


251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
class TestFlattenFP32Op_4(TestFlattenOp_4):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_4(TestFlattenOp_4):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_4(TestFlattenOp_4):
271 272 273
    def if_enable_cinn(self):
        pass

274 275 276 277
    def init_test_dtype(self):
        self.dtype = "uint16"


278 279 280 281 282 283 284 285 286 287
class TestFlattenOp_5(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 2
        self.stop_axis = 2
        self.new_shape = (3, 2, 5, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
288
            "stop_axis": self.stop_axis,
289 290 291
        }


292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
class TestFlattenFP32Op_5(TestFlattenOp_5):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_5(TestFlattenOp_5):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_5(TestFlattenOp_5):
312 313 314
    def if_enable_cinn(self):
        pass

315 316 317 318 319
    def init_test_dtype(self):
        self.dtype = "uint16"


class TestFlattenOp_ZeroDim(TestFlattenOp):
320
    def init_test_case(self):
321
        self.in_shape = ()
322 323 324 325
        self.start_axis = 0
        self.stop_axis = -1
        self.new_shape = (1,)

326
    def if_enable_cinn(self):
327 328
        self.enable_cinn = False

329 330 331 332 333 334 335
    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
            "stop_axis": self.stop_axis,
        }


336 337 338 339 340 341 342 343 344 345 346 347 348 349
class TestFlattenFP32Op_ZeroDim(TestFlattenOp_ZeroDim):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_ZeroDim(TestFlattenOp_ZeroDim):
    def init_test_dtype(self):
        self.dtype = "float16"


350 351 352 353 354 355 356 357 358 359
class TestFlattenOpSixDims(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 3, 2, 4, 4)
        self.start_axis = 3
        self.stop_axis = 5
        self.new_shape = (3, 2, 3, 32)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
360
            "stop_axis": self.stop_axis,
361 362 363
        }


364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
class TestFlattenFP32OpSixDims(TestFlattenOpSixDims):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16OpSixDims(TestFlattenOpSixDims):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16OpSixDims(TestFlattenOpSixDims):
384 385 386
    def if_enable_cinn(self):
        pass

387 388 389 390
    def init_test_dtype(self):
        self.dtype = "uint16"


391 392 393
class TestFlatten2OpError(unittest.TestCase):
    def test_errors(self):
        image_shape = (2, 3, 4, 4)
394 395 396 397 398 399 400 401 402
        x = (
            np.arange(
                image_shape[0]
                * image_shape[1]
                * image_shape[2]
                * image_shape[3]
            ).reshape(image_shape)
            / 100.0
        )
403 404 405
        x = x.astype('float32')

        def test_ValueError1():
406 407 408
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
409
            out = paddle.flatten(x_var, start_axis=3, stop_axis=1)
410 411 412 413

        self.assertRaises(ValueError, test_ValueError1)

        def test_ValueError2():
414 415 416
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
417 418 419 420 421
            paddle.flatten(x_var, start_axis=10, stop_axis=1)

        self.assertRaises(ValueError, test_ValueError2)

        def test_ValueError3():
422 423 424
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
425 426 427 428
            paddle.flatten(x_var, start_axis=2, stop_axis=10)

        self.assertRaises(ValueError, test_ValueError3)

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
        def test_ValueError4():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            paddle.flatten(x_var, start_axis=2.0, stop_axis=10)

        self.assertRaises(ValueError, test_ValueError4)

        def test_ValueError5():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            paddle.flatten(x_var, start_axis=2, stop_axis=10.0)

        self.assertRaises(ValueError, test_ValueError5)

445 446 447 448 449 450
        def test_InputError():
            out = paddle.flatten(x)

        self.assertRaises(ValueError, test_InputError)


451 452 453 454 455 456 457 458 459 460
class TestStaticFlattenPythonAPI(unittest.TestCase):
    def execute_api(self, x, start_axis=0, stop_axis=-1):
        return paddle.flatten(x, start_axis, stop_axis)

    def test_static_api(self):
        paddle.enable_static()
        np_x = np.random.rand(2, 3, 4, 4).astype('float32')

        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, paddle.static.Program()):
461 462 463
            x = paddle.static.data(
                name="x", shape=[2, 3, 4, 4], dtype='float32'
            )
464 465 466 467 468 469 470
            out = self.execute_api(x, start_axis=-2, stop_axis=-1)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        fetch_out = exe.run(main_prog, feed={"x": np_x}, fetch_list=[out])
        self.assertTrue((2, 3, 16) == fetch_out[0].shape)


D
danleifeng 已提交
471 472 473 474 475 476 477 478
class TestStaticFlattenInferShapePythonAPI(unittest.TestCase):
    def execute_api(self, x, start_axis=0, stop_axis=-1):
        return paddle.flatten(x, start_axis, stop_axis)

    def test_static_api(self):
        paddle.enable_static()
        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, paddle.static.Program()):
479 480 481
            x = paddle.static.data(
                name="x", shape=[-1, 3, -1, -1], dtype='float32'
            )
D
danleifeng 已提交
482 483 484 485
            out = self.execute_api(x, start_axis=2, stop_axis=3)
        self.assertTrue((-1, 3, -1) == out.shape)


486 487 488 489 490
class TestStaticInplaceFlattenPythonAPI(TestStaticFlattenPythonAPI):
    def execute_api(self, x, start_axis=0, stop_axis=-1):
        return x.flatten_(start_axis, stop_axis)


491 492 493
class TestFlattenPython(unittest.TestCase):
    def test_python_api(self):
        image_shape = (2, 3, 4, 4)
494 495 496 497 498 499 500 501 502
        x = (
            np.arange(
                image_shape[0]
                * image_shape[1]
                * image_shape[2]
                * image_shape[3]
            ).reshape(image_shape)
            / 100.0
        )
503 504 505 506 507 508 509 510
        x = x.astype('float32')

        def test_InputError():
            out = paddle.flatten(x)

        self.assertRaises(ValueError, test_InputError)

        def test_Negative():
511
            paddle.disable_static()
Z
Zhou Wei 已提交
512
            img = paddle.to_tensor(x)
513 514 515 516 517 518 519
            out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
            return out.numpy().shape

        res_shape = test_Negative()
        self.assertTrue((2, 3, 16) == res_shape)


520 521 522
class TestDygraphInplaceFlattenPython(unittest.TestCase):
    def test_python_api(self):
        image_shape = (2, 3, 4, 4)
523 524 525 526 527 528 529 530 531
        x = (
            np.arange(
                image_shape[0]
                * image_shape[1]
                * image_shape[2]
                * image_shape[3]
            ).reshape(image_shape)
            / 100.0
        )
532 533 534 535 536 537 538 539 540 541 542 543 544
        x = x.astype('float32')

        def test_Negative():
            paddle.disable_static()
            img = paddle.to_tensor(x)
            out = img.flatten_(start_axis=-2, stop_axis=-1)
            return out.numpy().shape

        res_shape = test_Negative()
        self.assertTrue((2, 3, 16) == res_shape)
        paddle.enable_static()


545 546
class TestFlatten0DTensorOpError(unittest.TestCase):
    def test_errors(self):
547
        image_shape = ()
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
        x = np.random.uniform(-1.0, 1.0, []).astype('float32')

        def test_ValueError1():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            out = paddle.flatten(x_var, start_axis=10, stop_axis=0)

        self.assertRaises(ValueError, test_ValueError1)

        def test_ValueError2():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            out = paddle.flatten(x_var, start_axis=0, stop_axis=10)

        self.assertRaises(ValueError, test_ValueError2)


567 568
if __name__ == "__main__":
    unittest.main()