diff --git a/paddle/fluid/operators/activation_op_npu.cc b/paddle/fluid/operators/activation_op_npu.cc index eb218507103dd6671801465021e318f3577c49fa..20c56d6a279334c87245857fdcceb375c02aed7b 100644 --- a/paddle/fluid/operators/activation_op_npu.cc +++ b/paddle/fluid/operators/activation_op_npu.cc @@ -459,6 +459,78 @@ class SigmoidGradNPUKernel : public framework::OpKernel { } }; +// Swish = x * sigmoid(beta * x) +template +class SwishNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + auto* out = ctx.Output("Out"); + float beta = ctx.Attr("beta"); + + out->mutable_data(ctx.GetPlace()); + auto stream = + ctx.template device_context() + .stream(); + + const auto& muls_runner = + NpuOpRunner("Muls", {*x}, {*out}, {{"value", beta}}); + muls_runner.Run(stream); + + const auto& sigmoid_runner = NpuOpRunner("Sigmoid", {*out}, {*out}, {}); + sigmoid_runner.Run(stream); + + const auto& mul_runner = NpuOpRunner("Mul", {*x, *out}, {*out}); + mul_runner.Run(stream); + } +}; + +template +class SwishGradNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + auto* dout = ctx.Input(framework::GradVarName("Out")); + auto* dx = ctx.Output(framework::GradVarName("X")); + float beta = ctx.Attr("beta"); + + dx->mutable_data(ctx.GetPlace()); + auto stream = + ctx.template device_context() + .stream(); + + Tensor beta_x, sigmoid_out, swish_out; + beta_x.mutable_data(x->dims(), ctx.GetPlace()); + sigmoid_out.mutable_data(x->dims(), ctx.GetPlace()); + swish_out.mutable_data(x->dims(), ctx.GetPlace()); + + const auto& muls_runner = + NpuOpRunner("Muls", {*x}, {beta_x}, {{"value", beta}}); + muls_runner.Run(stream); + + const auto& sigmoid_runner = + NpuOpRunner("Sigmoid", {beta_x}, {sigmoid_out}, {}); + sigmoid_runner.Run(stream); + + const auto& mul_runner = + NpuOpRunner("Mul", {sigmoid_out, *x}, {swish_out}, {}); + mul_runner.Run(stream); + + const auto& mul_runner1 = + NpuOpRunner("Mul", {sigmoid_out, swish_out}, {*dx}, {}); + mul_runner1.Run(stream); + + const auto& sub_runner = NpuOpRunner("Sub", {swish_out, *dx}, {*dx}, {}); + sub_runner.Run(stream); + + const auto& add_runner = NpuOpRunner("Add", {sigmoid_out, *dx}, {*dx}, {}); + add_runner.Run(stream); + + const auto& mul_runner2 = NpuOpRunner("Mul", {*dout, *dx}, {*dx}, {}); + mul_runner2.Run(stream); + } +}; + // HardSwish = min(max(0, x+offset), threshold) * x / scale template class HardSwishNPUKernel : public framework::OpKernel { @@ -936,6 +1008,12 @@ REGISTER_OP_NPU_KERNEL( ops::SigmoidGradNPUKernel); +REGISTER_OP_NPU_KERNEL(swish, ops::SwishNPUKernel, + ops::SwishNPUKernel); + +REGISTER_OP_NPU_KERNEL(swish_grad, ops::SwishGradNPUKernel, + ops::SwishGradNPUKernel); + REGISTER_OP_NPU_KERNEL(hard_swish, ops::HardSwishNPUKernel, ops::HardSwishNPUKernel); diff --git a/python/paddle/fluid/tests/unittests/npu/test_swish_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_swish_op_npu.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c488625be9e4c90394dd9cce78229fb1cd93e3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_swish_op_npu.py @@ -0,0 +1,75 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +from paddle.fluid.tests.unittests.op_test import OpTest +import paddle +import paddle.fluid as fluid +from test_activation_op import ref_swish, expit + +paddle.enable_static() +SEED = 1024 + + +class TestSwishOp(OpTest): + def setUp(self): + self.op_type = "swish" + self.set_npu() + self.init_dtype() + np.random.seed(2048) + x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) + out = ref_swish(x) + self.inputs = {'X': x} + self.attrs = {'beta': 1.0} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + beta = self.attrs['beta'] + out = self.outputs['Out'] + x = self.inputs['X'] + dx = beta * out + expit(x) * (1 - beta * out) + dx = dx / x.size + + self.check_grad_with_place( + self.place, ['X'], + 'Out', + max_relative_error=0.01, + user_defined_grads=[dx]) + + def set_npu(self): + self.__class__.use_npu = True + self.place = paddle.NPUPlace(0) + + def init_dtype(self): + self.dtype = np.float32 + + +class TestSwishOpFp16(TestSwishOp): + def test_check_output(self): + self.check_output_with_place(self.place, atol=1e-3) + + def init_dtype(self): + self.dtype = np.float16 + + +if __name__ == '__main__': + unittest.main()