test_unique.py 14.3 KB
Newer Older
Z
zhoukunsheng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

Z
zhoukunsheng 已提交
17
import numpy as np
W
wanghuancoder 已提交
18
from eager_op_test import OpTest, paddle_static_guard
19

Z
Zhang Ting 已提交
20
import paddle
21
from paddle.fluid import core
Z
zhoukunsheng 已提交
22 23 24 25 26


class TestUniqueOp(OpTest):
    def setUp(self):
        self.op_type = "unique"
27
        self.init_dtype()
Z
zhoukunsheng 已提交
28 29 30
        self.init_config()

    def test_check_output(self):
W
wanghuancoder 已提交
31 32 33
        self.check_output(
            check_dygraph=False
        )  # unique return sorted data in dygraph
Z
zhoukunsheng 已提交
34

35 36 37
    def init_dtype(self):
        self.dtype = np.int64

Z
zhoukunsheng 已提交
38
    def init_config(self):
39
        self.inputs = {
40
            'X': np.array([2, 3, 3, 1, 5, 3], dtype=self.dtype),
41
        }
Z
zhoukunsheng 已提交
42 43
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)}
        self.outputs = {
44
            'Out': np.array([2, 3, 1, 5], dtype=self.dtype),
45
            'Index': np.array([0, 1, 1, 2, 3, 1], dtype='int32'),
Z
zhoukunsheng 已提交
46 47 48 49 50
        }


class TestOne(TestUniqueOp):
    def init_config(self):
51
        self.inputs = {
52
            'X': np.array([2], dtype=self.dtype),
53
        }
Z
zhoukunsheng 已提交
54 55
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)}
        self.outputs = {
56
            'Out': np.array([2], dtype=self.dtype),
57
            'Index': np.array([0], dtype='int32'),
Z
zhoukunsheng 已提交
58 59 60 61 62
        }


class TestRandom(TestUniqueOp):
    def init_config(self):
63
        self.inputs = {'X': np.random.randint(0, 100, (150,), dtype=self.dtype)}
Z
zhoukunsheng 已提交
64
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)}
65 66 67
        np_unique, np_index, reverse_index = np.unique(
            self.inputs['X'], True, True
        )
Z
zhoukunsheng 已提交
68 69
        np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))]
        np_tuple.sort(key=lambda x: x[1])
70
        target_out = np.array([i[0] for i in np_tuple], dtype=self.dtype)
Z
zhoukunsheng 已提交
71
        target_index = np.array(
72 73
            [list(target_out).index(i) for i in self.inputs['X']], dtype='int64'
        )
Z
zhoukunsheng 已提交
74 75 76 77

        self.outputs = {'Out': target_out, 'Index': target_index}


78 79
class TestUniqueRaiseError(unittest.TestCase):
    def test_errors(self):
W
wanghuancoder 已提交
80
        with paddle_static_guard():
81

W
wanghuancoder 已提交
82 83
            def test_type():
                paddle.unique([10])
84

W
wanghuancoder 已提交
85
            self.assertRaises(TypeError, test_type)
86

W
wanghuancoder 已提交
87 88 89 90 91
            def test_dtype():
                data = paddle.static.data(
                    shape=[10], dtype="float16", name="input"
                )
                paddle.unique(data)
92

W
wanghuancoder 已提交
93
            self.assertRaises(TypeError, test_dtype)
94 95


96 97 98
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
99 100
class TestOneGPU(TestUniqueOp):
    def init_config(self):
101
        self.inputs = {
102
            'X': np.array([2], dtype=self.dtype),
103
        }
104 105
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)}
        self.outputs = {
106
            'Out': np.array([2], dtype=self.dtype),
107
            'Index': np.array([0], dtype='int32'),
108 109 110 111 112
        }

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
W
wanghuancoder 已提交
113 114 115
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=False
            )  # unique return sorted data in dygraph
116 117


118 119 120
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
121 122
class TestRandomGPU(TestUniqueOp):
    def init_config(self):
123
        self.inputs = {'X': np.random.randint(0, 100, (150,), dtype=self.dtype)}
124
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)}
125 126 127
        np_unique, np_index, reverse_index = np.unique(
            self.inputs['X'], True, True
        )
128 129
        np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))]
        np_tuple.sort(key=lambda x: x[1])
130
        target_out = np.array([i[0] for i in np_tuple], dtype=self.dtype)
131
        target_index = np.array(
132 133
            [list(target_out).index(i) for i in self.inputs['X']], dtype='int64'
        )
134 135 136 137 138 139

        self.outputs = {'Out': target_out, 'Index': target_index}

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
W
wanghuancoder 已提交
140 141 142
            self.check_output_with_place(
                place, atol=1e-5, check_dygraph=False
            )  # unique return sorted data in dygraph
143 144


Z
Zhang Ting 已提交
145
class TestSortedUniqueOp(TestUniqueOp):
146 147 148
    def init_dtype(self):
        self.dtype = np.float64

Z
Zhang Ting 已提交
149
    def init_config(self):
150
        self.inputs = {'X': np.array([2, 3, 3, 1, 5, 3], dtype=self.dtype)}
151 152 153 154 155 156 157
        unique, indices, inverse, count = np.unique(
            self.inputs['X'],
            return_index=True,
            return_inverse=True,
            return_counts=True,
            axis=None,
        )
Z
Zhang Ting 已提交
158 159 160 161 162 163
        self.attrs = {
            'dtype': int(core.VarDesc.VarType.INT32),
            "return_index": True,
            "return_inverse": True,
            "return_counts": True,
            "axis": None,
164
            "is_sorted": True,
Z
Zhang Ting 已提交
165 166 167 168 169 170 171 172 173
        }
        self.outputs = {
            'Out': unique,
            'Indices': indices,
            "Index": inverse,
            "Counts": count,
        }


174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
class TestSortedUniqueFP16Op(TestSortedUniqueOp):
    def init_dtype(self):
        self.dtype = np.float16


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestSortedUniqueBF16Op(TestSortedUniqueOp):
    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(
            place, check_dygraph=False
        )  # unique return sorted data in dygraph


Z
Zhang Ting 已提交
195
class TestUniqueOpAxisNone(TestUniqueOp):
196 197 198
    def init_dtype(self):
        self.dtype = np.float64

Z
Zhang Ting 已提交
199
    def init_config(self):
200 201 202
        self.inputs = {
            'X': np.random.randint(0, 100, (4, 7, 10)).astype(self.dtype)
        }
203 204 205 206 207 208 209
        unique, indices, inverse, counts = np.unique(
            self.inputs['X'],
            return_index=True,
            return_inverse=True,
            return_counts=True,
            axis=None,
        )
Z
Zhang Ting 已提交
210 211 212 213 214 215
        self.attrs = {
            'dtype': int(core.VarDesc.VarType.INT32),
            "return_index": True,
            "return_inverse": True,
            "return_counts": True,
            "axis": None,
216
            "is_sorted": True,
217 218 219 220 221 222 223 224 225
        }
        self.outputs = {
            'Out': unique,
            'Indices': indices,
            "Index": inverse,
            "Counts": counts,
        }


226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
class TestUniqueOpAxisNoneFP16Op(TestUniqueOpAxisNone):
    def init_dtype(self):
        self.dtype = np.float16


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestUniqueOpAxisNoneBF16Op(TestUniqueOpAxisNone):
    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(
            place, check_dygraph=False
        )  # unique return sorted data in dygraph


247
class TestUniqueOpAxisNeg(TestUniqueOp):
248 249 250
    def init_dtype(self):
        self.dtype = np.float64

251
    def init_config(self):
252 253 254
        self.inputs = {
            'X': np.random.randint(0, 100, (6, 1, 8)).astype(self.dtype)
        }
255 256 257 258 259 260 261 262 263 264 265 266 267 268
        unique, indices, inverse, counts = np.unique(
            self.inputs['X'],
            return_index=True,
            return_inverse=True,
            return_counts=True,
            axis=-1,
        )
        self.attrs = {
            'dtype': int(core.VarDesc.VarType.INT32),
            "return_index": True,
            "return_inverse": True,
            "return_counts": True,
            "axis": [-1],
            "is_sorted": True,
Z
Zhang Ting 已提交
269 270 271 272 273 274 275 276 277
        }
        self.outputs = {
            'Out': unique,
            'Indices': indices,
            "Index": inverse,
            "Counts": counts,
        }


278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
class TestUniqueOpAxisNegFP16Op(TestUniqueOpAxisNeg):
    def init_dtype(self):
        self.dtype = np.float16


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestUniqueOpAxisNegBF16Op(TestUniqueOpAxisNeg):
    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(
            place, check_dygraph=False
        )  # unique return sorted data in dygraph


Z
Zhang Ting 已提交
299
class TestUniqueOpAxis1(TestUniqueOp):
300 301 302
    def init_dtype(self):
        self.dtype = np.float64

Z
Zhang Ting 已提交
303
    def init_config(self):
304 305 306
        self.inputs = {
            'X': np.random.randint(0, 100, (3, 8, 8)).astype(self.dtype)
        }
307 308 309 310 311 312 313
        unique, indices, inverse, counts = np.unique(
            self.inputs['X'],
            return_index=True,
            return_inverse=True,
            return_counts=True,
            axis=1,
        )
Z
Zhang Ting 已提交
314 315 316 317 318 319
        self.attrs = {
            'dtype': int(core.VarDesc.VarType.INT32),
            "return_index": True,
            "return_inverse": True,
            "return_counts": True,
            "axis": [1],
320
            "is_sorted": True,
Z
Zhang Ting 已提交
321 322 323 324 325 326 327 328 329
        }
        self.outputs = {
            'Out': unique,
            'Indices': indices,
            "Index": inverse,
            "Counts": counts,
        }


330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
class TestUniqueOpAxis1FP16Op(TestUniqueOpAxis1):
    def init_dtype(self):
        self.dtype = np.float16


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestUniqueOpAxis1BF16Op(TestUniqueOpAxis1):
    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(
            place, check_dygraph=False
        )  # unique return sorted data in dygraph


Z
Zhang Ting 已提交
351 352
class TestUniqueAPI(unittest.TestCase):
    def test_dygraph_api_out(self):
W
wanghuancoder 已提交
353
        paddle.disable_static()
Z
Zhang Ting 已提交
354 355 356 357 358 359 360
        x_data = x_data = np.random.randint(0, 10, (120))
        x = paddle.to_tensor(x_data)
        out = paddle.unique(x)
        expected_out = np.unique(x_data)
        self.assertTrue((out.numpy() == expected_out).all(), True)

    def test_dygraph_api_attr(self):
W
wanghuancoder 已提交
361
        paddle.disable_static()
Z
Zhang Ting 已提交
362 363
        x_data = np.random.random((3, 5, 5)).astype("float32")
        x = paddle.to_tensor(x_data)
364 365 366 367 368 369 370 371 372 373 374 375 376 377
        out, index, inverse, counts = paddle.unique(
            x,
            return_index=True,
            return_inverse=True,
            return_counts=True,
            axis=0,
        )
        np_out, np_index, np_inverse, np_counts = np.unique(
            x_data,
            return_index=True,
            return_inverse=True,
            return_counts=True,
            axis=0,
        )
Z
Zhang Ting 已提交
378 379 380 381 382
        self.assertTrue((out.numpy() == np_out).all(), True)
        self.assertTrue((index.numpy() == np_index).all(), True)
        self.assertTrue((inverse.numpy() == np_inverse).all(), True)
        self.assertTrue((counts.numpy() == np_counts).all(), True)

Z
Zhang Ting 已提交
383
    def test_dygraph_attr_dtype(self):
W
wanghuancoder 已提交
384
        paddle.disable_static()
Z
Zhang Ting 已提交
385 386
        x_data = x_data = np.random.randint(0, 10, (120))
        x = paddle.to_tensor(x_data)
387 388 389 390 391 392 393
        out, indices, inverse, counts = paddle.unique(
            x,
            return_index=True,
            return_inverse=True,
            return_counts=True,
            dtype="int32",
        )
Z
Zhang Ting 已提交
394
        expected_out, np_indices, np_inverse, np_counts = np.unique(
395 396
            x_data, return_index=True, return_inverse=True, return_counts=True
        )
Z
Zhang Ting 已提交
397 398 399 400 401
        self.assertTrue((out.numpy() == expected_out).all(), True)
        self.assertTrue((indices.numpy() == np_indices).all(), True)
        self.assertTrue((inverse.numpy() == np_inverse).all(), True)
        self.assertTrue((counts.numpy() == np_counts).all(), True)

Z
Zhang Ting 已提交
402
    def test_static_graph(self):
W
wanghuancoder 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416
        with paddle_static_guard():
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = paddle.static.data(name='x', shape=[3, 2], dtype='float64')
                unique, inverse, counts = paddle.unique(
                    x, return_inverse=True, return_counts=True, axis=0
                )
                place = paddle.CPUPlace()
                exe = paddle.static.Executor(place)
                x_np = np.array([[1, 2], [3, 4], [1, 2]]).astype('float64')
                result = exe.run(
                    feed={"x": x_np}, fetch_list=[unique, inverse, counts]
                )
Z
Zhang Ting 已提交
417 418 419 420 421


class TestUniqueError(unittest.TestCase):
    def test_input_dtype(self):
        def test_x_dtype():
W
wanghuancoder 已提交
422 423 424 425 426 427 428 429
            with paddle_static_guard():
                with paddle.static.program_guard(
                    paddle.static.Program(), paddle.static.Program()
                ):
                    x = paddle.static.data(
                        name='x', shape=[10, 10], dtype='float16'
                    )
                    result = paddle.unique(x)
Z
Zhang Ting 已提交
430

W
wanghuancoder 已提交
431
                self.assertRaises(TypeError, test_x_dtype)
Z
Zhang Ting 已提交
432 433

    def test_attr(self):
W
wanghuancoder 已提交
434 435
        with paddle_static_guard():
            x = paddle.static.data(name='x', shape=[10, 10], dtype='float64')
Z
Zhang Ting 已提交
436

W
wanghuancoder 已提交
437 438
            def test_return_index():
                result = paddle.unique(x, return_index=0)
Z
Zhang Ting 已提交
439

W
wanghuancoder 已提交
440
            self.assertRaises(TypeError, test_return_index)
Z
Zhang Ting 已提交
441

W
wanghuancoder 已提交
442 443
            def test_return_inverse():
                result = paddle.unique(x, return_inverse='s')
Z
Zhang Ting 已提交
444

W
wanghuancoder 已提交
445
            self.assertRaises(TypeError, test_return_inverse)
Z
Zhang Ting 已提交
446

W
wanghuancoder 已提交
447 448
            def test_return_counts():
                result = paddle.unique(x, return_counts=3)
Z
Zhang Ting 已提交
449

W
wanghuancoder 已提交
450
            self.assertRaises(TypeError, test_return_counts)
Z
Zhang Ting 已提交
451

W
wanghuancoder 已提交
452 453
            def test_axis():
                result = paddle.unique(x, axis='12')
Z
Zhang Ting 已提交
454

W
wanghuancoder 已提交
455 456
            def test_dtype():
                result = paddle.unique(x, dtype='float64')
Z
Zhang Ting 已提交
457

W
wanghuancoder 已提交
458
            self.assertRaises(TypeError, test_axis)
Z
Zhang Ting 已提交
459 460


Z
zhoukunsheng 已提交
461 462
if __name__ == "__main__":
    unittest.main()