diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py index 1962a4ed6a6eeea076be8df394586eabdf7766b2..bf046b89322bbf53ee771979cad4852509fe8ca7 100644 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -84,7 +84,9 @@ def accuracy(input, label, k=1, correct=None, total=None): if total is None: total = _varbase_creator(dtype="int32") - topk_out, topk_indices = nn.topk(input, k=k) + _k = k.numpy().item(0) if isinstance(k, Variable) else k + topk_out, topk_indices = _C_ops.top_k_v2(input, 'k', _k, 'sorted', + False) _acc, _, _ = _C_ops.accuracy(topk_out, topk_indices, label, correct, total) return _acc @@ -92,7 +94,20 @@ def accuracy(input, label, k=1, correct=None, total=None): helper = LayerHelper("accuracy", **locals()) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'accuracy') - topk_out, topk_indices = nn.topk(input, k=k) + topk_out = helper.create_variable_for_type_inference(dtype=input.dtype) + topk_indices = helper.create_variable_for_type_inference(dtype="int64") + inputs = {"X": [input]} + if isinstance(k, Variable): + inputs['K'] = [k] + else: + attrs = {'k': k} + attrs['sorted'] = False + helper.append_op( + type="top_k_v2", + inputs=inputs, + attrs=attrs, + outputs={"Out": [topk_out], + "Indices": [topk_indices]}) acc_out = helper.create_variable_for_type_inference(dtype="float32") if correct is None: correct = helper.create_variable_for_type_inference(dtype="int32") diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py index 00cf7d5e9877b80a9bc1036e686a31f19a00a48a..10ab76e4bfb1581cb631e1ac1410fba76e562923 100755 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -78,6 +78,42 @@ class TestAccuracyOpError(unittest.TestCase): paddle.metric.accuracy(input=x3, label=label) +class TestAccuracyAPI1(unittest.TestCase): + def setUp(self): + self.predictions = paddle.static.data( + shape=[2, 5], name="predictions", dtype="float32") + self.label = paddle.static.data( + shape=[2, 1], name="labels", dtype="int64") + self.result = paddle.static.accuracy( + input=self.predictions, label=self.label, k=1) + self.input_predictions = np.array( + [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], + dtype="float32") + self.input_labels = np.array([[2], [0]], dtype="int64") + self.expect_value = np.array([0.5], dtype='float32') + + def test_api(self): + exe = paddle.static.Executor() + result, = exe.run(feed={ + "predictions": self.input_predictions, + 'labels': self.input_labels + }, + fetch_list=[self.result.name]) + self.assertEqual((result == self.expect_value).all(), True) + + +class TestAccuracyAPI2(unittest.TestCase): + def test_api(self): + with fluid.dygraph.guard(): + predictions = paddle.to_tensor( + [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], + dtype='float32') + label = paddle.to_tensor([[2], [0]], dtype="int64") + result = paddle.static.accuracy(input=predictions, label=label, k=1) + expect_value = np.array([0.5], dtype='float32') + self.assertEqual((result.numpy() == expect_value).all(), True) + + class TestAccuracyAPI(unittest.TestCase): def test_api(self): with fluid.dygraph.guard():