From 9310e56a28e9fbb8c1e8c1c951f751323dc8d826 Mon Sep 17 00:00:00 2001 From: zhangyikun02 <48021248+zhangyk0314@users.noreply.github.com> Date: Thu, 1 Sep 2022 10:02:46 +0800 Subject: [PATCH] move range kernel to phi, test=kunlun (#45602) --- cmake/external/xpu.cmake | 4 +- paddle/fluid/operators/range_op_xpu.cc | 70 ------------------------- paddle/phi/kernels/xpu/arange_kernel.cc | 66 +++++++++++++++++++++++ 3 files changed, 68 insertions(+), 72 deletions(-) delete mode 100644 paddle/fluid/operators/range_op_xpu.cc create mode 100644 paddle/phi/kernels/xpu/arange_kernel.cc diff --git a/cmake/external/xpu.cmake b/cmake/external/xpu.cmake index 71c18bb0fe..9e7ecdfdac 100644 --- a/cmake/external/xpu.cmake +++ b/cmake/external/xpu.cmake @@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so") if(NOT DEFINED XPU_BASE_URL) set(XPU_BASE_URL_WITHOUT_DATE "https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev") - set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220812") + set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220820") else() set(XPU_BASE_URL "${XPU_BASE_URL}") endif() @@ -19,7 +19,7 @@ endif() if(NOT DEFINED XPU_XDNN_BASE_URL) set(XPU_XDNN_BASE_URL_WITHOUT_DATE "https://klx-sdk-release-public.su.bcebos.com/xdnn/dev") - set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220812") + set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220820") else() set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL}") endif() diff --git a/paddle/fluid/operators/range_op_xpu.cc b/paddle/fluid/operators/range_op_xpu.cc deleted file mode 100644 index cf968747b8..0000000000 --- a/paddle/fluid/operators/range_op_xpu.cc +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PADDLE_WITH_XPU -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/range_op.h" - -namespace paddle { -namespace operators { - -template -class XPURangeKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* start_t = context.Input("Start"); - auto* end_t = context.Input("End"); - auto* step_t = context.Input("Step"); - auto* out = context.Output("Out"); - - framework::Tensor n; - framework::TensorCopySync(*start_t, platform::CPUPlace(), &n); - T start = n.data()[0]; - framework::TensorCopySync(*end_t, platform::CPUPlace(), &n); - T end = n.data()[0]; - framework::TensorCopySync(*step_t, platform::CPUPlace(), &n); - T step = n.data()[0]; - - int64_t size = 0; - GetSize(start, end, step, &size); - out->Resize(phi::make_ddim({size})); - - T* out_data = out->mutable_data(context.GetPlace()); - - framework::Tensor out_cpu; - T* out_cpu_data_ptr = - out_cpu.mutable_data(platform::CPUPlace(), out->numel() * sizeof(T)); - T value = start; - for (int64_t i = 0; i < size; ++i) { - out_cpu_data_ptr[i] = value; - value += step; - } - memory::Copy(context.GetPlace(), - static_cast(out_data), - platform::CPUPlace(), - static_cast(out_cpu_data_ptr), - out->numel() * sizeof(T)); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP_XPU_KERNEL(range, - ops::XPURangeKernel, - ops::XPURangeKernel, - ops::XPURangeKernel, - ops::XPURangeKernel); -#endif // PADDLE_WITH_XPU diff --git a/paddle/phi/kernels/xpu/arange_kernel.cc b/paddle/phi/kernels/xpu/arange_kernel.cc new file mode 100644 index 0000000000..58a133c1c9 --- /dev/null +++ b/paddle/phi/kernels/xpu/arange_kernel.cc @@ -0,0 +1,66 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/kernels/arange_kernel.h" + +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/range_function.h" + +namespace phi { + +template +void ArangeKernel(const Context& dev_ctx, + const DenseTensor& start, + const DenseTensor& end, + const DenseTensor& step, + DenseTensor* out) { + auto place = dev_ctx.GetPlace(); + auto cpu_place = phi::CPUPlace(); + + DenseTensor n_cpu; + n_cpu.Resize({start.numel()}); + T* n_cpu_data = dev_ctx.template HostAlloc(&n_cpu); + + paddle::memory::Copy( + cpu_place, n_cpu_data, place, start.data(), sizeof(T) * start.numel()); + T start_value = n_cpu_data[0]; + paddle::memory::Copy( + cpu_place, n_cpu_data, place, end.data(), sizeof(T) * end.numel()); + T end_value = n_cpu_data[0]; + paddle::memory::Copy( + cpu_place, n_cpu_data, place, step.data(), sizeof(T) * step.numel()); + T step_value = n_cpu_data[0]; + + int64_t size = 0; + phi::funcs::GetSize(start_value, end_value, step_value, &size); + out->Resize(phi::make_ddim({size})); + T* out_data = dev_ctx.template Alloc(out); + + DenseTensor out_cpu; + out_cpu.Resize({out->numel()}); + T* out_cpu_data = dev_ctx.template HostAlloc(&out_cpu); + T value = start_value; + for (int64_t i = 0; i < size; ++i) { + out_cpu_data[i] = value; + value += step_value; + } + paddle::memory::Copy( + place, out_data, cpu_place, out_cpu_data, out->numel() * sizeof(T)); +} + +} // namespace phi + +PD_REGISTER_KERNEL( + arange, XPU, ALL_LAYOUT, phi::ArangeKernel, float, double, int, int64_t) {} -- GitLab