From ef15544ee0a01a2b4e88f51e490fb28a0770a3a2 Mon Sep 17 00:00:00 2001 From: OleNet Date: Wed, 17 Mar 2021 15:40:11 +0800 Subject: [PATCH] [NPU] add NPU add topk (#31596) * add topk op * add cmake * update topk npu op * refactor func * fix test not go npu TopKD bug * NPUPlace(4) to NPUPlace(0) * update comment Co-authored-by: oyjxer <1728722986@qq.com> --- paddle/fluid/operators/top_k_op_npu.cc | 89 +++++++++++++++++ .../tests/unittests/npu/test_top_k_op_npu.py | 95 +++++++++++++++++++ 2 files changed, 184 insertions(+) create mode 100644 paddle/fluid/operators/top_k_op_npu.cc create mode 100644 python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py diff --git a/paddle/fluid/operators/top_k_op_npu.cc b/paddle/fluid/operators/top_k_op_npu.cc new file mode 100644 index 00000000000..a4690133410 --- /dev/null +++ b/paddle/fluid/operators/top_k_op_npu.cc @@ -0,0 +1,89 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/operators/top_k_op.h" +#include "paddle/fluid/operators/npu_op_runner.h" + +namespace paddle { +namespace operators { + +void gen_assist_seq(framework::Tensor* assit_tensor, + int64_t dim, const framework::ExecutionContext& ctx) { + const int64_t dimx2 = dim; + std::vector assit; + assit.resize(2 * dimx2); + for (int64_t i = 0; i < dimx2; i++) { + // for i in range [0, dim] + assit[i] = static_cast(i); + + // for i in range [dim, dimx2] + int64_t idx = static_cast( + static_cast(i)); + int64_t gap = i - idx; + assit[i + dim] = static_cast(gap); + } + framework::TensorFromVector(assit, ctx.device_context(), assit_tensor); +} + + +template +class TopkNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + // read input + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + auto* indices = ctx.Output("Indices"); + + size_t k = static_cast(ctx.Attr("k")); + + output->mutable_data(ctx.GetPlace()); + indices->mutable_data(ctx.GetPlace()); + + // prepare assit + auto dim = input->dims().size(); + framework::Tensor assist_seq_tensor; + assist_seq_tensor.Resize({2 * dim}); + assist_seq_tensor.mutable_data(ctx.GetPlace()); + gen_assist_seq(&assist_seq_tensor, dim, ctx); + + framework::NPUAttributeMap attr_input = {{"sorted", "true"}, + {"k", static_cast(k)}, + {"dim", -1}, + {"largest", true}}; + + // run ascend + auto runner = NpuOpRunner("TopKD", + {*input, assist_seq_tensor}, + {*output, *indices}, + attr_input); + + auto stream = + ctx.template device_context() + .stream(); + + runner.Run(stream); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +// Ascend Op TopKD only support input float 16 dtype +REGISTER_OP_NPU_KERNEL( + top_k, + ops::TopkNPUKernel); diff --git a/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py new file mode 100644 index 00000000000..a59d2e618c6 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py @@ -0,0 +1,95 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid +from paddle.fluid import core + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestTopk(OpTest): + def setUp(self): + self.set_npu() + self.place = paddle.NPUPlace(0) + self.op_type = "top_k" + self.init_dtype() + + x = np.array([[0.78104149, 0.88745828, 0.32362268], + [0.82196718, 0.48763277, 0.42826136], + [0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype) + + self.inputs = {'X': x} + np_out = np.array([[0.88745828], [0.82196718], [0.96527182]]).astype(self.dtype) + np_indices = np.array([[1], [0], [0]]) + + self.attrs = {'k': 1, "axis": -1} + self.outputs = {'Out': np_out, 'Indices':np_indices} + + def set_npu(self): + self.__class__.use_npu = True + self.__class__.no_need_check_grad = True + + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestTopkV2(OpTest): + def setUp(self): + self.set_npu() + self.place = paddle.NPUPlace(0) + self.op_type = "top_k" + self.init_dtype() + + x = np.array([[0.78104149, 0.88745828, 0.32362268], + [0.82196718, 0.48763277, 0.42826136], + [0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype) + + self.inputs = {'X': x} + np_out = np.array([[0.88745828, 0.78104149], [0.82196718, 0.48763277], [0.96527182, 0.34851612]]).astype(self.dtype) + np_indices = np.array([[1, 0], [0, 1], [0, 1]]) + + self.attrs = {'k': 2, "axis": -1} + self.outputs = {'Out': np_out, 'Indices':np_indices} + + def set_npu(self): + self.__class__.use_npu = True + self.__class__.no_need_check_grad = True + + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + +if __name__ == '__main__': + unittest.main() + + -- GitLab