From bf61a0d9d544b11355fc77e6a8bb1a7d03d4e23f Mon Sep 17 00:00:00 2001 From: zhangyikun02 <48021248+zhangyk0314@users.noreply.github.com> Date: Wed, 29 Mar 2023 10:54:10 +0800 Subject: [PATCH] move clip_by_norm kernel to phi for xpu (#52183) --- paddle/fluid/operators/clip_by_norm_op_xpu.cc | 80 ------------------- paddle/phi/kernels/xpu/clip_by_norm_kernel.cc | 55 +++++++++++++ 2 files changed, 55 insertions(+), 80 deletions(-) delete mode 100644 paddle/fluid/operators/clip_by_norm_op_xpu.cc create mode 100644 paddle/phi/kernels/xpu/clip_by_norm_kernel.cc diff --git a/paddle/fluid/operators/clip_by_norm_op_xpu.cc b/paddle/fluid/operators/clip_by_norm_op_xpu.cc deleted file mode 100644 index ac146629f02..00000000000 --- a/paddle/fluid/operators/clip_by_norm_op_xpu.cc +++ /dev/null @@ -1,80 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PADDLE_WITH_XPU -#include - -#include "paddle/fluid/operators/clip_by_norm_op.h" - -namespace paddle { -namespace operators { - -template -class XPUClipByNormKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto max_norm = context.Attr("max_norm"); - auto in_var = context.InputVar("X"); - - phi::DenseTensor* output = nullptr; - const phi::DenseTensor* input = nullptr; - if (in_var->IsType()) { - input = context.Input("X"); - - output = context.Output("Out"); - output->mutable_data(context.GetPlace()); - } else { - PADDLE_THROW(platform::errors::InvalidArgument( - "Invalid input variable type, only support LodTensor" - "type, but got type is %s.", - framework::ToTypeName(in_var->Type()))); - } - - PADDLE_ENFORCE_NOT_NULL(input, - platform::errors::InvalidArgument( - "Input(X) of ClipByNormOp should not be null. " - "Please check if it is created correctly.")); - auto& dev_ctx = context.template device_context(); - const auto& x_dims = input->dims(); - std::vector xshape(x_dims.size()); - std::vector rdims(x_dims.size()); - for (int i = 0; i < x_dims.size(); i++) { - xshape[i] = x_dims[i]; - rdims[i] = i; - } - int r = xpu::clip_by_norm(dev_ctx.x_context(), - input->data(), - output->data(), - max_norm, - xshape, - rdims); - PADDLE_ENFORCE_EQ( - r, - XPU_SUCCESS, - platform::errors::External("XPU API(clip_by_norm) return " - "wrong value[%d], please check whether " - "Baidu Kunlun Card is properly installed.", - r)); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP_XPU_KERNEL( - clip_by_norm, - ops::XPUClipByNormKernel); - -#endif // PADDLE_WITH_XPU diff --git a/paddle/phi/kernels/xpu/clip_by_norm_kernel.cc b/paddle/phi/kernels/xpu/clip_by_norm_kernel.cc new file mode 100644 index 00000000000..c3105bc2318 --- /dev/null +++ b/paddle/phi/kernels/xpu/clip_by_norm_kernel.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/clip_by_norm_kernel.h" + +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/backends/xpu/xpu_context.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void ClipByNormKernel(const Context& dev_ctx, + const DenseTensor& in, + float max_norm, + DenseTensor* output) { + auto input = ∈ + dev_ctx.template Alloc(output); + + PADDLE_ENFORCE_NOT_NULL(input, + phi::errors::InvalidArgument( + "Input(X) of ClipByNormOp should not be null. " + "Please check if it is created correctly.")); + + const auto& x_dims = input->dims(); + std::vector xshape(x_dims.size()); + std::vector rdims(x_dims.size()); + for (int i = 0; i < x_dims.size(); i++) { + xshape[i] = x_dims[i]; + rdims[i] = i; + } + int r = xpu::clip_by_norm(dev_ctx.x_context(), + input->data(), + output->data(), + max_norm, + xshape, + rdims); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "clip_by_norm"); +} + +} // namespace phi + +PD_REGISTER_KERNEL( + clip_by_norm, XPU, ALL_LAYOUT, phi::ClipByNormKernel, float) {} -- GitLab