test_unique_with_counts.py 5.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17 18
import numpy as np
from op_test import OpTest
19

20
import paddle
21
from paddle.fluid import core
22 23 24 25 26 27 28 29 30 31 32


class TestUniqueWithCountsOp(OpTest):
    def setUp(self):
        self.op_type = "unique_with_counts"
        self.init_config()

    def test_check_output(self):
        self.check_output()

    def init_config(self):
33 34 35
        self.inputs = {
            'X': np.array([2, 3, 3, 1, 5, 3], dtype='int64'),
        }
36 37
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)}
        self.outputs = {
38 39
            'Out': np.array([2, 3, 1, 5], dtype='int64'),
            'Index': np.array([0, 1, 1, 2, 3, 1], dtype='int32'),
40
            'Count': np.array([1, 3, 1, 1], dtype='int32'),
41 42 43 44 45
        }


class TestOne(TestUniqueWithCountsOp):
    def init_config(self):
46 47 48
        self.inputs = {
            'X': np.array([2], dtype='int64'),
        }
49 50
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)}
        self.outputs = {
51 52
            'Out': np.array([2], dtype='int64'),
            'Index': np.array([0], dtype='int32'),
53
            'Count': np.array([1], dtype='int32'),
54 55 56 57 58
        }


class TestRandom(TestUniqueWithCountsOp):
    def init_config(self):
59
        input_data = np.random.randint(0, 100, (2000,), dtype='int64')
60 61
        self.inputs = {'X': input_data}
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)}
62 63 64
        np_unique, np_index, reverse_index = np.unique(
            self.inputs['X'], True, True
        )
65 66 67 68
        np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))]
        np_tuple.sort(key=lambda x: x[1])
        target_out = np.array([i[0] for i in np_tuple], dtype='int64')
        target_index = np.array(
69 70
            [list(target_out).index(i) for i in self.inputs['X']], dtype='int64'
        )
71 72 73 74 75 76 77
        count = [0 for i in range(len(np_unique))]
        for i in range(target_index.shape[0]):
            count[target_index[i]] += 1
        target_count = np.array(count, dtype='int64')
        self.outputs = {
            'Out': target_out,
            'Index': target_index,
78
            'Count': target_count,
79 80 81
        }


82 83 84
class TestUniqueWithCountsRaiseError(unittest.TestCase):
    def test_errors(self):
        def test_type():
85
            paddle.unique([10])
86 87 88 89

        self.assertRaises(TypeError, test_type)

        def test_dtype():
90
            data = paddle.static.data(shape=[10], dtype="float16", name="input")
91
            paddle.unique(data)
92 93 94 95

        self.assertRaises(TypeError, test_dtype)


96 97 98
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
99 100
class TestOneGPU(TestUniqueWithCountsOp):
    def init_config(self):
101 102 103
        self.inputs = {
            'X': np.array([2], dtype='int64'),
        }
104 105
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)}
        self.outputs = {
106 107
            'Out': np.array([2], dtype='int64'),
            'Index': np.array([0], dtype='int32'),
108
            'Count': np.array([1], dtype='int32'),
109 110 111 112 113 114 115 116
        }

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)


117 118 119
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
120 121
class TestRandomGPU(TestUniqueWithCountsOp):
    def init_config(self):
122
        input_data = np.random.randint(0, 100, (2000,), dtype='int64')
123 124
        self.inputs = {'X': input_data}
        self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)}
125 126 127
        np_unique, np_index, reverse_index = np.unique(
            self.inputs['X'], True, True
        )
128 129 130 131
        np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))]
        np_tuple.sort(key=lambda x: x[1])
        target_out = np.array([i[0] for i in np_tuple], dtype='int64')
        target_index = np.array(
132 133
            [list(target_out).index(i) for i in self.inputs['X']], dtype='int64'
        )
134 135 136 137 138 139 140
        count = [0 for i in range(len(np_unique))]
        for i in range(target_index.shape[0]):
            count[target_index[i]] += 1
        target_count = np.array(count, dtype='int64')
        self.outputs = {
            'Out': target_out,
            'Index': target_index,
141
            'Count': target_count,
142 143 144 145 146 147 148 149
        }

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)


150 151
if __name__ == "__main__":
    unittest.main()