test_flatten_contiguous_range_op.py 15.0 KB
Newer Older
1
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16
19

20
import paddle
21
from paddle.fluid import core
22

23 24 25

class TestFlattenOp(OpTest):
    def setUp(self):
26
        self.python_api = paddle.flatten
27
        self.public_python_api = paddle.flatten
28
        self.python_out_sig = ["Out"]
29
        self.op_type = "flatten_contiguous_range"
30
        self.prim_op_type = "comp"
31 32
        self.start_axis = 0
        self.stop_axis = -1
33
        self.skip_cinn()
34
        self.init_test_case()
35 36
        self.init_test_dtype()
        self.init_input_data()
37 38 39
        self.init_attrs()
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
40
            "XShape": np.random.random(self.in_shape).astype("float32"),
41 42
        }

43 44 45
    def skip_cinn(self):
        self.enable_cinn = True

46
    def test_check_output(self):
47 48 49 50 51 52
        if str(self.dtype) in {"float16", "uint16"}:
            self.check_output_with_place(
                core.CUDAPlace(0), no_check_set=["XShape"], check_prim=True
            )
        else:
            self.check_output(no_check_set=["XShape"], check_prim=True)
53 54

    def test_check_grad(self):
55 56 57 58 59 60
        if str(self.dtype) in {"float16", "uint16"}:
            self.check_grad_with_place(
                core.CUDAPlace(0), ["X"], "Out", check_prim=True
            )
        else:
            self.check_grad(["X"], "Out", check_prim=True)
61 62 63 64 65

    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 0
        self.stop_axis = -1
66
        self.new_shape = 120
67 68 69 70

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
71
            "stop_axis": self.stop_axis,
72 73
        }

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
    def init_test_dtype(self):
        self.dtype = "float64"

    def init_input_data(self):
        if str(self.dtype) != "uint16":
            x = np.random.random(self.in_shape).astype(self.dtype)
        else:
            x = np.random.random(self.in_shape).astype("float32")
            x = convert_float_to_uint16(x)

        self.inputs = {"X": x}


class TestFlattenFP32Op(TestFlattenOp):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op(TestFlattenOp):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op(TestFlattenOp):
    def init_test_dtype(self):
        self.dtype = "uint16"

110 111 112 113 114 115 116 117 118 119 120

class TestFlattenOp_1(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 1
        self.stop_axis = 2
        self.new_shape = (3, 10, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
121
            "stop_axis": self.stop_axis,
122 123 124
        }


125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
class TestFlattenFP32Op_1(TestFlattenOp_1):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_1(TestFlattenOp_1):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_1(TestFlattenOp_1):
    def init_test_dtype(self):
        self.dtype = "uint16"


149 150 151 152 153 154 155 156 157 158
class TestFlattenOp_2(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 0
        self.stop_axis = 1
        self.new_shape = (6, 5, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
159
            "stop_axis": self.stop_axis,
160 161 162
        }


163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
class TestFlattenFP32Op_2(TestFlattenOp_2):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_2(TestFlattenOp_2):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_2(TestFlattenOp_2):
    def init_test_dtype(self):
        self.dtype = "uint16"


187 188 189 190 191 192 193 194 195 196
class TestFlattenOp_3(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 0
        self.stop_axis = 2
        self.new_shape = (30, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
197
            "stop_axis": self.stop_axis,
198 199 200
        }


201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
class TestFlattenFP32Op_3(TestFlattenOp_3):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_3(TestFlattenOp_3):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_3(TestFlattenOp_3):
    def init_test_dtype(self):
        self.dtype = "uint16"


225 226 227 228 229 230 231 232 233 234
class TestFlattenOp_4(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = -2
        self.stop_axis = -1
        self.new_shape = (3, 2, 20)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
235
            "stop_axis": self.stop_axis,
236 237 238
        }


239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
class TestFlattenFP32Op_4(TestFlattenOp_4):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_4(TestFlattenOp_4):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_4(TestFlattenOp_4):
    def init_test_dtype(self):
        self.dtype = "uint16"


263 264 265 266 267 268 269 270 271 272
class TestFlattenOp_5(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 5, 4)
        self.start_axis = 2
        self.stop_axis = 2
        self.new_shape = (3, 2, 5, 4)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
273
            "stop_axis": self.stop_axis,
274 275 276
        }


277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
class TestFlattenFP32Op_5(TestFlattenOp_5):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_5(TestFlattenOp_5):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16Op_5(TestFlattenOp_5):
    def init_test_dtype(self):
        self.dtype = "uint16"


class TestFlattenOp_ZeroDim(TestFlattenOp):
302
    def init_test_case(self):
303
        self.in_shape = ()
304 305 306 307
        self.start_axis = 0
        self.stop_axis = -1
        self.new_shape = (1,)

308 309 310
    def skip_cinn(self):
        self.enable_cinn = False

311 312 313 314 315 316 317
    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
            "stop_axis": self.stop_axis,
        }


318 319 320 321 322 323 324 325 326 327 328 329 330 331
class TestFlattenFP32Op_ZeroDim(TestFlattenOp_ZeroDim):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16Op_ZeroDim(TestFlattenOp_ZeroDim):
    def init_test_dtype(self):
        self.dtype = "float16"


332 333 334 335 336 337 338 339 340 341
class TestFlattenOpSixDims(TestFlattenOp):
    def init_test_case(self):
        self.in_shape = (3, 2, 3, 2, 4, 4)
        self.start_axis = 3
        self.stop_axis = 5
        self.new_shape = (3, 2, 3, 32)

    def init_attrs(self):
        self.attrs = {
            "start_axis": self.start_axis,
342
            "stop_axis": self.stop_axis,
343 344 345
        }


346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
class TestFlattenFP32OpSixDims(TestFlattenOpSixDims):
    def init_test_dtype(self):
        self.dtype = "float32"


@unittest.skipIf(
    not core.is_compiled_with_cuda(),
    "core is not complied with CUDA",
)
class TestFlattenFP16OpSixDims(TestFlattenOpSixDims):
    def init_test_dtype(self):
        self.dtype = "float16"


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestFlattenBF16OpSixDims(TestFlattenOpSixDims):
    def init_test_dtype(self):
        self.dtype = "uint16"


370 371 372
class TestFlatten2OpError(unittest.TestCase):
    def test_errors(self):
        image_shape = (2, 3, 4, 4)
373 374 375 376 377 378 379 380 381
        x = (
            np.arange(
                image_shape[0]
                * image_shape[1]
                * image_shape[2]
                * image_shape[3]
            ).reshape(image_shape)
            / 100.0
        )
382 383 384
        x = x.astype('float32')

        def test_ValueError1():
385 386 387
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
388
            out = paddle.flatten(x_var, start_axis=3, stop_axis=1)
389 390 391 392

        self.assertRaises(ValueError, test_ValueError1)

        def test_ValueError2():
393 394 395
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
396 397 398 399 400
            paddle.flatten(x_var, start_axis=10, stop_axis=1)

        self.assertRaises(ValueError, test_ValueError2)

        def test_ValueError3():
401 402 403
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
404 405 406 407
            paddle.flatten(x_var, start_axis=2, stop_axis=10)

        self.assertRaises(ValueError, test_ValueError3)

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
        def test_ValueError4():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            paddle.flatten(x_var, start_axis=2.0, stop_axis=10)

        self.assertRaises(ValueError, test_ValueError4)

        def test_ValueError5():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            paddle.flatten(x_var, start_axis=2, stop_axis=10.0)

        self.assertRaises(ValueError, test_ValueError5)

424 425 426 427 428 429
        def test_InputError():
            out = paddle.flatten(x)

        self.assertRaises(ValueError, test_InputError)


430 431 432 433 434 435 436 437 438 439
class TestStaticFlattenPythonAPI(unittest.TestCase):
    def execute_api(self, x, start_axis=0, stop_axis=-1):
        return paddle.flatten(x, start_axis, stop_axis)

    def test_static_api(self):
        paddle.enable_static()
        np_x = np.random.rand(2, 3, 4, 4).astype('float32')

        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, paddle.static.Program()):
440 441 442
            x = paddle.static.data(
                name="x", shape=[2, 3, 4, 4], dtype='float32'
            )
443 444 445 446 447 448 449
            out = self.execute_api(x, start_axis=-2, stop_axis=-1)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        fetch_out = exe.run(main_prog, feed={"x": np_x}, fetch_list=[out])
        self.assertTrue((2, 3, 16) == fetch_out[0].shape)


D
danleifeng 已提交
450 451 452 453 454 455 456 457
class TestStaticFlattenInferShapePythonAPI(unittest.TestCase):
    def execute_api(self, x, start_axis=0, stop_axis=-1):
        return paddle.flatten(x, start_axis, stop_axis)

    def test_static_api(self):
        paddle.enable_static()
        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, paddle.static.Program()):
458 459 460
            x = paddle.static.data(
                name="x", shape=[-1, 3, -1, -1], dtype='float32'
            )
D
danleifeng 已提交
461 462 463 464
            out = self.execute_api(x, start_axis=2, stop_axis=3)
        self.assertTrue((-1, 3, -1) == out.shape)


465 466 467 468 469
class TestStaticInplaceFlattenPythonAPI(TestStaticFlattenPythonAPI):
    def execute_api(self, x, start_axis=0, stop_axis=-1):
        return x.flatten_(start_axis, stop_axis)


470 471 472
class TestFlattenPython(unittest.TestCase):
    def test_python_api(self):
        image_shape = (2, 3, 4, 4)
473 474 475 476 477 478 479 480 481
        x = (
            np.arange(
                image_shape[0]
                * image_shape[1]
                * image_shape[2]
                * image_shape[3]
            ).reshape(image_shape)
            / 100.0
        )
482 483 484 485 486 487 488 489
        x = x.astype('float32')

        def test_InputError():
            out = paddle.flatten(x)

        self.assertRaises(ValueError, test_InputError)

        def test_Negative():
490
            paddle.disable_static()
Z
Zhou Wei 已提交
491
            img = paddle.to_tensor(x)
492 493 494 495 496 497 498
            out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
            return out.numpy().shape

        res_shape = test_Negative()
        self.assertTrue((2, 3, 16) == res_shape)


499 500 501
class TestDygraphInplaceFlattenPython(unittest.TestCase):
    def test_python_api(self):
        image_shape = (2, 3, 4, 4)
502 503 504 505 506 507 508 509 510
        x = (
            np.arange(
                image_shape[0]
                * image_shape[1]
                * image_shape[2]
                * image_shape[3]
            ).reshape(image_shape)
            / 100.0
        )
511 512 513 514 515 516 517 518 519 520 521 522 523
        x = x.astype('float32')

        def test_Negative():
            paddle.disable_static()
            img = paddle.to_tensor(x)
            out = img.flatten_(start_axis=-2, stop_axis=-1)
            return out.numpy().shape

        res_shape = test_Negative()
        self.assertTrue((2, 3, 16) == res_shape)
        paddle.enable_static()


524 525
class TestFlatten0DTensorOpError(unittest.TestCase):
    def test_errors(self):
526
        image_shape = ()
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
        x = np.random.uniform(-1.0, 1.0, []).astype('float32')

        def test_ValueError1():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            out = paddle.flatten(x_var, start_axis=10, stop_axis=0)

        self.assertRaises(ValueError, test_ValueError1)

        def test_ValueError2():
            x_var = paddle.static.data(
                name="x", shape=image_shape, dtype='float32'
            )
            out = paddle.flatten(x_var, start_axis=0, stop_axis=10)

        self.assertRaises(ValueError, test_ValueError2)


546 547
if __name__ == "__main__":
    unittest.main()