diff --git a/paddle/fluid/operators/scale_op_xpu.cc b/paddle/fluid/operators/scale_op_xpu.cc index 4960f720ee39aaa130544befc9b0a6449d5381d9..75f3cc9c5aa30090943c7b024bd0ceaf0ab2412f 100644 --- a/paddle/fluid/operators/scale_op_xpu.cc +++ b/paddle/fluid/operators/scale_op_xpu.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/scale_op.h" #include -#include "paddle/fluid/platform/device/xpu/xpu_header.h" +#include "paddle/pten/kernels/scale_kernel.h" namespace paddle { namespace operators { @@ -41,21 +41,12 @@ class ScaleXPUKernel : public framework::OpKernel { auto* out = framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(out_var); out->mutable_data(in->place()); - PADDLE_ENFORCE_EQ( - in->dims(), out->dims(), - platform::errors::InvalidArgument("In and out should have the same dim," - " expected %s, but got %s.", - in->dims().to_str().c_str(), - out->dims().to_str().c_str())); auto& dev_ctx = ctx.template device_context(); - int r = xpu::scale(dev_ctx.x_context(), - reinterpret_cast(in->data()), - reinterpret_cast(out->data()), in->numel(), - bias_after_scale, scale, bias); - PADDLE_ENFORCE_EQ( - r, XPU_SUCCESS, - platform::errors::External("XPU scale kernel return wrong value[%d %s]", - r, XPUAPIErrorMsg[r])); + // call pten kernel + pten::ScaleKernel( + static_cast::TYPE&>(dev_ctx), + *in, scale, bias, bias_after_scale, out); } }; diff --git a/paddle/pten/kernels/xpu/scale_kernel.cc b/paddle/pten/kernels/xpu/scale_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..116cd63f876207b39bc9b523b9f9e70876cc1b98 --- /dev/null +++ b/paddle/pten/kernels/xpu/scale_kernel.cc @@ -0,0 +1,65 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/kernels/scale_kernel.h" + +#include "paddle/fluid/platform/device/xpu/xpu_header.h" +#include "paddle/pten/backends/xpu/xpu_context.h" +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/common/float16.h" +#include "paddle/pten/core/convert_utils.h" +#include "paddle/pten/core/kernel_registry.h" + +namespace pten { + +template +void ScaleKernel(const Context& dev_ctx, + const DenseTensor& x, + const Scalar& scale, + float bias, + bool bias_after_scale, + DenseTensor* out) { + out->mutable_data(dev_ctx.GetPlace()); + + PADDLE_ENFORCE_EQ(x.dims(), + out->dims(), + paddle::platform::errors::InvalidArgument( + "In and out should have the same dim," + " expected %s, but got %s.", + x.dims().to_str().c_str(), + out->dims().to_str().c_str())); + using XPUType = typename XPUTypeTrait::Type; + int r = xpu::scale(dev_ctx.x_context(), + reinterpret_cast(x.data()), + reinterpret_cast(out->data()), + x.numel(), + bias_after_scale, + scale.to(), + bias); + PADDLE_ENFORCE_EQ( + r, + XPU_SUCCESS, + paddle::platform::errors::External( + "XPU scale kernel return wrong value[%d %s]", r, XPUAPIErrorMsg[r])); +} + +} // namespace pten + +PT_REGISTER_KERNEL(scale, + XPU, + ALL_LAYOUT, + pten::ScaleKernel, + float, + pten::dtype::float16, + int64_t) {}