From dbbe6e9cb61d74b45ada4efb78e44db1a0068105 Mon Sep 17 00:00:00 2001 From: wawltor Date: Mon, 2 Dec 2019 16:31:14 +0800 Subject: [PATCH] fix the device supported of the op unique and unique_with_counts. (#21395) * fix the device supported of the op unique and unique_with_counts. test=develop test=document_fix * Fix the precision of test in the op of unique and unique_with_counts. test=develop test=document_fix --- paddle/fluid/operators/unique_op.cc | 8 +++ .../fluid/operators/unique_with_counts_op.cc | 8 +++ python/paddle/fluid/layers/nn.py | 2 +- .../fluid/tests/unittests/test_unique.py | 42 +++++++++++++++ .../unittests/test_unique_with_counts.py | 52 +++++++++++++++++++ 5 files changed, 111 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/unique_op.cc b/paddle/fluid/operators/unique_op.cc index 08ce81d75e4..255d8ac4f3a 100644 --- a/paddle/fluid/operators/unique_op.cc +++ b/paddle/fluid/operators/unique_op.cc @@ -35,6 +35,14 @@ class UniqueOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", {-1}); ctx->SetOutputDim("Index", in_dims); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + OperatorWithKernel::IndicateVarDataType(ctx, "X"), + platform::CPUPlace()); + } }; class UniqueOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/fluid/operators/unique_with_counts_op.cc b/paddle/fluid/operators/unique_with_counts_op.cc index 770bbefea15..0d7b3dc485c 100644 --- a/paddle/fluid/operators/unique_with_counts_op.cc +++ b/paddle/fluid/operators/unique_with_counts_op.cc @@ -40,6 +40,14 @@ class UniqueWithCountsOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Index", in_dims); ctx->SetOutputDim("Count", {-1}); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + OperatorWithKernel::IndicateVarDataType(ctx, "X"), + platform::CPUPlace()); + } }; class UniqueWithCountsOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f7ec2453f2c..0386159b5bf 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12746,7 +12746,7 @@ def unique_with_counts(x, dtype='int32'): This OP return a unique tensor for `x` , and count tensor that the count of unqiue result in raw input, \ and an index tensor pointing to this unique tensor. - **NOTICE**: This op just be supported in device of CPU, and support the variable type of Tensor only. + **NOTICE**: This op support the variable type of Tensor only. Args: x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64. diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index 2e91574954e..2a0ace246f5 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -68,5 +68,47 @@ class TestRandom(TestUniqueOp): self.outputs = {'Out': target_out, 'Index': target_index} +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestOneGPU(TestUniqueOp): + def init_config(self): + self.inputs = {'X': np.array([2], dtype='int64'), } + self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)} + self.outputs = { + 'Out': np.array( + [2], dtype='int64'), + 'Index': np.array( + [0], dtype='int32') + } + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestRandomGPU(TestUniqueOp): + def init_config(self): + self.inputs = {'X': np.random.randint(0, 100, (150, ), dtype='int64')} + self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)} + np_unique, np_index, reverse_index = np.unique(self.inputs['X'], True, + True) + np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))] + np_tuple.sort(key=lambda x: x[1]) + target_out = np.array([i[0] for i in np_tuple], dtype='int64') + target_index = np.array( + [list(target_out).index(i) for i in self.inputs['X']], + dtype='int64') + + self.outputs = {'Out': target_out, 'Index': target_index} + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py index 80056422a2a..a1e9709ed93 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py +++ b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py @@ -80,5 +80,57 @@ class TestRandom(TestUniqueWithCountsOp): } +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestOneGPU(TestUniqueWithCountsOp): + def init_config(self): + self.inputs = {'X': np.array([2], dtype='int64'), } + self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)} + self.outputs = { + 'Out': np.array( + [2], dtype='int64'), + 'Index': np.array( + [0], dtype='int32'), + 'Count': np.array( + [1], dtype='int32') + } + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestRandomGPU(TestUniqueWithCountsOp): + def init_config(self): + input_data = np.random.randint(0, 100, (2000, ), dtype='int64') + self.inputs = {'X': input_data} + self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)} + np_unique, np_index, reverse_index = np.unique(self.inputs['X'], True, + True) + np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))] + np_tuple.sort(key=lambda x: x[1]) + target_out = np.array([i[0] for i in np_tuple], dtype='int64') + target_index = np.array( + [list(target_out).index(i) for i in self.inputs['X']], + dtype='int64') + count = [0 for i in range(len(np_unique))] + for i in range(target_index.shape[0]): + count[target_index[i]] += 1 + target_count = np.array(count, dtype='int64') + self.outputs = { + 'Out': target_out, + 'Index': target_index, + 'Count': target_count + } + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + + if __name__ == "__main__": unittest.main() -- GitLab