From 4641e8fc5d8dba9b4b5ed6092d264d3a2024e4e0 Mon Sep 17 00:00:00 2001 From: JingZhuangzhuang <75348594+JZZ-NOTE@users.noreply.github.com> Date: Thu, 19 Aug 2021 19:26:49 +0800 Subject: [PATCH] [NPU] Support npu kernel for sin op (#34844) * add npu sin op * [NPU] Support npu kernel for sin op * modify support npu kernel for sin op * modify support npu kernel for sin op * modify nou sin op * modify npu sin op * add sin op npu --- paddle/fluid/operators/activation_op_npu.cc | 27 +++++++ .../tests/unittests/npu/test_sin_op_npu.py | 80 +++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py diff --git a/paddle/fluid/operators/activation_op_npu.cc b/paddle/fluid/operators/activation_op_npu.cc index 5f2925784e..eb21850710 100644 --- a/paddle/fluid/operators/activation_op_npu.cc +++ b/paddle/fluid/operators/activation_op_npu.cc @@ -811,6 +811,27 @@ class ExpGradNPUKernel : public framework::OpKernel { } }; +template +class SinNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + + auto* out = ctx.Output("Out"); + + auto place = ctx.GetPlace(); + + out->mutable_data(place); + + auto stream = + ctx.template device_context() + .stream(); + + const auto& runner = NpuOpRunner("Sin", {*x}, {*out}, {}); + runner.Run(stream); + } +}; + } // namespace operators } // namespace paddle @@ -975,3 +996,9 @@ REGISTER_OP_NPU_KERNEL( REGISTER_OP_NPU_KERNEL( exp_grad, ops::ExpGradNPUKernel, ops::ExpGradNPUKernel); + +REGISTER_OP_NPU_KERNEL( + sin, ops::SinNPUKernel, + ops::SinNPUKernel, + ops::SinNPUKernel); diff --git a/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py new file mode 100644 index 0000000000..437f5c35e9 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py @@ -0,0 +1,80 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest + +import numpy as np +from scipy.special import expit, erf + +from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid import compiler, Program, program_guard + +paddle.enable_static() + + +def test_class(op_type, typename): + class TestSin(OpTest): + def setUp(self): + self.op_type = "sin" + self.__class__.use_npu = True + self.place = paddle.NPUPlace(0) + self.__class__.no_need_check_grad = True + np.random.seed(1024) + x = np.random.uniform(-1, 1, [10, 12]).astype(typename) + out = np.sin(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + pass + + def test_out_name(self): + with fluid.program_guard(fluid.Program()): + np_x = np.array([0.1]) + data = fluid.layers.data(name="X", shape=[1]) + out = eval("paddle.%s(data, name='Y')" % self.op_type) + place = fluid.NPUPlace(0) + exe = fluid.Executor(place) + result, = exe.run(feed={"X": np_x}, fetch_list=[out]) + expected = eval("np.%s(np_x)" % self.op_type) + self.assertEqual(result, expected) + + def test_dygraph(self): + with fluid.dygraph.guard(paddle.NPUPlace(0)): + np_x = np.array([0.1]) + x = fluid.dygraph.to_variable(np_x) + z = eval("paddle.%s(x).numpy()" % self.op_type) + z_expected = eval("np.%s(np_x)" % self.op_type) + self.assertEqual(z, z_expected) + + cls_name = "{0}_{1}_1".format(op_type, typename) + TestSin.__name__ = cls_name + globals()[cls_name] = TestSin + + +for _typename in {'float16', 'float32', 'float64'}: + test_class("sin", _typename) + +if __name__ == "__main__": + unittest.main() -- GitLab