From bdac9ff6650d30f8b4fe0334e39c0a506757ea67 Mon Sep 17 00:00:00 2001 From: jakpiase <62569058+jakpiase@users.noreply.github.com> Date: Mon, 18 Oct 2021 12:38:24 +0200 Subject: [PATCH] Added softplus FP32 FWD OneDNN kernel (#36382) * added softplus * refactored softplus op * deleted unnecessary file * added missing file * added formatting * disabled tests if GPU is used * added reviewer suggestion * unified softplus kernel --- .../operators/mkldnn/activation_mkldnn_op.cc | 13 +++ .../operators/mkldnn/softplus_mkldnn_op.h | 94 +++++++++++++++++++ .../mkldnn/test_softplus_mkldnn_op.py | 78 +++++++++++++++ 3 files changed, 185 insertions(+) create mode 100644 paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h create mode 100644 python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index 603a70458b0..29106dc3049 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h" #include "paddle/fluid/platform/mkldnn_reuse.h" namespace paddle { @@ -169,6 +170,13 @@ struct GeluMKLDNNGradFunctor : public BaseActivationFunctor { } }; +template +struct SoftplusMKLDNNFunctor : public BaseActivationFunctor { + void operator()(const framework::ExecutionContext &ctx) const { + custom_softplus_eltwise_forward(ctx); + } +}; + template using ReluMKLDNNFunctor = MKLDNNActivationFunc; @@ -272,3 +280,8 @@ REGISTER_ACTIVATION_MKLDNN_BF16_KERNEL(gelu, GeluMKLDNNFunctor, GeluMKLDNNGradFunctor); REGISTER_ACTIVATION_MKLDNN_BF16_KERNEL(sigmoid, SigmoidMKLDNNFunctor, SigmoidMKLDNNGradFunctor); + +namespace ops = paddle::operators; +REGISTER_OP_KERNEL( + softplus, MKLDNN, paddle::platform::CPUPlace, + ops::MKLDNNActivationKernel>); diff --git a/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h b/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h new file mode 100644 index 00000000000..fdb2c534e03 --- /dev/null +++ b/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h @@ -0,0 +1,94 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/mkldnn_reuse.h" + +namespace paddle { +namespace operators { + +using paddle::framework::Tensor; + +template +class SoftplusMKLDNNHandler + : public platform::MKLDNNHandlerNoCachingT { + public: + SoftplusMKLDNNHandler(const Tensor* x, const float beta, + const mkldnn::engine engine, platform::Place cpu_place) + : platform::MKLDNNHandlerNoCachingT(engine, cpu_place) { + auto x_tz = framework::vectorize(x->dims()); + auto x_md = + dnnl::memory::desc(x_tz, platform::MKLDNNGetDataType(), x->format()); + + auto beta_tz = std::vector(x_tz.size(), 1); + auto beta_md = dnnl::memory::desc(beta_tz, platform::MKLDNNGetDataType(), + x->format()); + + dnnl::post_ops post_ops; + post_ops.append_eltwise(1.0f, dnnl::algorithm::eltwise_soft_relu, 0.0f, + 0.0f); + if (beta != 1.0f) { + post_ops.append_eltwise(1.0f, dnnl::algorithm::eltwise_linear, + 1.0f / beta, 0.0f); + } + + dnnl::primitive_attr attrs; + attrs.set_post_ops(post_ops); + + this->AcquireForwardPrimitiveDescriptor(attrs, dnnl::algorithm::binary_mul, + x_md, beta_md, x_md); + } + + std::shared_ptr AcquireBetaMemory(const float* beta) { + return this->AcquireMemoryFromPrimitive( + this->fwd_pd_->src1_desc(), platform::to_void_cast(beta)); + } +}; + +template +void custom_softplus_eltwise_forward(const framework::ExecutionContext& ctx) { + const auto& dev_ctx = + ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + const auto* x = ctx.Input("X"); + auto* out = ctx.Output("Out"); + + bool is_inplaced = x->IsSharedBufferWith(*out); + + const float beta = ctx.Attr("beta"); + + SoftplusMKLDNNHandler handler(x, beta, mkldnn_engine, ctx.GetPlace()); + + auto src_memory_p = handler.AcquireSrcMemory(x); + + auto beta_memory_p = handler.AcquireBetaMemory(&beta); + auto dst_memory_p = + is_inplaced ? src_memory_p : handler.AcquireDstMemory(out); + auto binary_p = handler.AcquireForwardPrimitive(); + + auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream(); + + const std::unordered_map args = { + {DNNL_ARG_SRC_0, *src_memory_p}, + {DNNL_ARG_SRC_1, *beta_memory_p}, + {DNNL_ARG_DST, *dst_memory_p}}; + + binary_p->execute(astream, args); + astream.wait(); + + out->set_layout(framework::DataLayout::kMKLDNN); + out->set_format(platform::GetMKLDNNFormat(*dst_memory_p)); +} +} // namespace operators +} // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py new file mode 100644 index 00000000000..92699cdbd27 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py @@ -0,0 +1,78 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.framework import _current_expected_place + + +def ref_softplus(x, beta, threshold): + x_beta = beta * x + out = np.select([x_beta <= threshold, x_beta > threshold], + [np.log(1 + np.exp(x_beta)) / beta, x]) + return out + + +@OpTestTool.skip_if(not (isinstance(_current_expected_place(), core.CPUPlace)), + "GPU is not supported") +class TestSoftplusOneDNNOp(OpTest): + def setUp(self): + self.op_type = "softplus" + self.beta = 1 + self.threshold = 20 + self.config() + self.attrs = {'use_mkldnn': True, 'beta': self.beta} + self.inputs = {'X': np.random.random(self.x_shape).astype(np.float32)} + self.outputs = { + 'Out': ref_softplus(self.inputs['X'], self.beta, self.threshold) + } + + def config(self): + self.x_shape = (10, 10) + + def test_check_output(self): + self.check_output() + + +class TestSoftplus4DOneDNNOp(TestSoftplusOneDNNOp): + def config(self): + self.x_shape = (10, 5, 4, 2) + + +class TestSoftplus6DOneDNNOp(TestSoftplusOneDNNOp): + def config(self): + self.x_shape = (3, 2, 2, 5, 4, 2) + + +class TestSoftplus6DExtendedFunctorOneDNNOp(TestSoftplusOneDNNOp): + def config(self): + self.x_shape = (3, 5, 2, 5, 4, 2) + self.beta = 2.5 + + +class TestSoftplus3DExtendedFunctorOneDNNOp(TestSoftplusOneDNNOp): + def config(self): + self.x_shape = (20, 4, 2) + self.beta = 0.4 + + +if __name__ == "__main__": + paddle.enable_static() + unittest.main() -- GitLab