From 429b5b5b1fb5f14c06e4c1f3cac7fd3f21bf707c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C5=82awomir=20Siwek?= Date: Wed, 7 Sep 2022 04:02:12 +0200 Subject: [PATCH] [PHI] Migrate scale kernel (#45537) * scale kernel * endline * add inplace * fix merge conflicts * Merge conflicts --- .../fluid/operators/mkldnn/scale_mkldnn_op.cc | 69 ------------------- paddle/phi/kernels/onednn/scale_kernel.cc | 62 +++++++++++++++++ 2 files changed, 62 insertions(+), 69 deletions(-) delete mode 100644 paddle/fluid/operators/mkldnn/scale_mkldnn_op.cc create mode 100644 paddle/phi/kernels/onednn/scale_kernel.cc diff --git a/paddle/fluid/operators/mkldnn/scale_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/scale_mkldnn_op.cc deleted file mode 100644 index 343ff47c488..00000000000 --- a/paddle/fluid/operators/mkldnn/scale_mkldnn_op.cc +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/platform/mkldnn_reuse.h" - -namespace paddle { -namespace operators { - -using paddle::framework::Tensor; - -template -class ScaleMKLDNNKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - this->RunKernel(ctx); - } - - void RunKernel(const framework::ExecutionContext& ctx) const { - const auto& dev_ctx = - ctx.template device_context(); - const auto& mkldnn_engine = dev_ctx.GetEngine(); - - auto* x = ctx.Input("X"); - auto* out = ctx.Output("Out"); - - bool is_inplaced = x->IsSharedBufferWith(*out); - - platform::ActivationMKLDNNHandler handler( - dnnl::algorithm::eltwise_linear, ctx, mkldnn_engine, ctx.GetPlace(), x); - - auto src_memory_p = handler.AcquireSrcMemory(x); - std::shared_ptr dst_memory_p = nullptr; - if (is_inplaced) { - dst_memory_p = src_memory_p; - out->mutable_data(ctx.GetPlace()); - } else { - dst_memory_p = handler.AcquireDstMemory(out); - } - auto activation_p = handler.AcquireForwardPrimitive(); - - auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream(); - activation_p->execute( - astream, - {{DNNL_ARG_FROM, *src_memory_p}, {DNNL_ARG_TO, *dst_memory_p}}); - astream.wait(); - - out->set_mem_desc(dst_memory_p->get_desc()); - } -}; -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP_KERNEL(scale, - MKLDNN, - paddle::platform::CPUPlace, - ops::ScaleMKLDNNKernel, - ops::ScaleMKLDNNKernel); diff --git a/paddle/phi/kernels/onednn/scale_kernel.cc b/paddle/phi/kernels/onednn/scale_kernel.cc new file mode 100644 index 00000000000..9ff767cff8c --- /dev/null +++ b/paddle/phi/kernels/onednn/scale_kernel.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/scale_kernel.h" + +#include "paddle/phi/backends/onednn/onednn_reuse.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void ScaleKernel(const Context& dev_ctx, + const DenseTensor& x, + const Scalar& scale, + float bias, + bool bias_after_scale, + DenseTensor* out) { + float alpha = scale.to(); + float beta = bias_after_scale ? bias : bias * alpha; + + funcs::ActivationOneDNNHandler handler(dnnl::algorithm::eltwise_linear, + alpha, + beta, + dev_ctx.GetEngine(), + dev_ctx.GetPlace(), + &x); + + auto src_memory_p = handler.AcquireSrcMemory(&x); + auto activation_p = handler.AcquireForwardPrimitive(); + + bool is_inplaced = x.IsSharedBufferWith(*out); + std::shared_ptr dst_memory_p = nullptr; + if (is_inplaced) { + dst_memory_p = src_memory_p; + dev_ctx.template Alloc(out); + } else { + dst_memory_p = handler.AcquireDstMemory(out); + } + + auto& astream = OneDNNContext::tls().get_stream(); + activation_p->execute( + astream, {{DNNL_ARG_FROM, *src_memory_p}, {DNNL_ARG_TO, *dst_memory_p}}); + astream.wait(); + + out->set_mem_desc(dst_memory_p->get_desc()); +} + +} // namespace phi + +PD_REGISTER_KERNEL( + scale, OneDNN, ALL_LAYOUT, phi::ScaleKernel, float, phi::dtype::bfloat16) {} -- GitLab