From ac3d821bc0b2186b598db2b73434250a9e4d0124 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Thu, 4 Mar 2021 15:01:43 +0800 Subject: [PATCH] [NPU] add npu kernel for equal op (#31393) * add npu kernel for equal op * refine code * add more ut * update year --- .../operators/controlflow/compare_op_npu.cc | 53 +++++++++++ .../unittests/npu/test_compare_op_npu.py | 90 +++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 paddle/fluid/operators/controlflow/compare_op_npu.cc create mode 100644 python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py diff --git a/paddle/fluid/operators/controlflow/compare_op_npu.cc b/paddle/fluid/operators/controlflow/compare_op_npu.cc new file mode 100644 index 00000000000..58401302bc3 --- /dev/null +++ b/paddle/fluid/operators/controlflow/compare_op_npu.cc @@ -0,0 +1,53 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" +#include "paddle/fluid/operators/controlflow/compare_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" +#include "paddle/fluid/operators/npu_op_runner.h" + +namespace paddle { +namespace operators { + +template +class EqualNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* out = ctx.Output("Out"); + out->mutable_data(ctx.GetPlace()); + + auto runner = NpuOpRunner("Equal", {*x, *y}, {*out}, {}); + auto stream = + ctx.template device_context() + .stream(); + runner.Run(stream); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_NPU_KERNEL(equal, ops::EqualNPUKernel, + ops::EqualNPUKernel, + ops::EqualNPUKernel); diff --git a/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py new file mode 100644 index 00000000000..c82897d54b8 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py @@ -0,0 +1,90 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestEqual(OpTest): + def setUp(self): + self.set_npu() + self.op_type = "equal" + self.place = paddle.NPUPlace(0) + + self.init_dtype() + np.random.seed(SEED) + x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) + y = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) + out = x == y # all elements are not equal + + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(x), + 'Y': OpTest.np_dtype_to_fluid_dtype(y) + } + self.outputs = {'Out': out} + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + +class TestEqual2(TestEqual): + def setUp(self): + self.set_npu() + self.op_type = "equal" + self.place = paddle.NPUPlace(0) + + self.init_dtype() + np.random.seed(SEED) + x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) + y = x.copy() + y[0][1] = 1 + out = x == y # all elements are equal, except position [0][1] + + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(x), + 'Y': OpTest.np_dtype_to_fluid_dtype(y) + } + self.outputs = {'Out': out} + + +class TestEqual2FP16(TestEqual2): + def init_dtype(self): + self.dtype = np.float16 + + +class TestEqual2Int(TestEqual2): + def init_dtype(self): + self.dtype = np.int32 + + +if __name__ == '__main__': + unittest.main() -- GitLab