From 848ae7dc34c84c09ac6df93e5cfd5c2031156cea Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Fri, 28 Jan 2022 11:58:52 +0800 Subject: [PATCH] Move digamma to pten (#39240) * move digamma to pten; test=develop * fix mutable_data bugs; test=develop * remove useless code; test=develop * remove kernel compute; test=develop * fix bug; test=develop --- paddle/fluid/operators/digamma_op.cc | 16 ++-- paddle/fluid/operators/digamma_op.cu | 26 ------ paddle/fluid/operators/digamma_op.h | 81 ------------------- .../pten/kernels/cpu/digamma_grad_kernel.cc | 23 ++++++ paddle/pten/kernels/cpu/digamma_kernel.cc | 23 ++++++ paddle/pten/kernels/digamma_grad_kernel.h | 27 +++++++ paddle/pten/kernels/digamma_kernel.h | 24 ++++++ .../pten/kernels/gpu/digamma_grad_kernel.cu | 22 +++++ paddle/pten/kernels/gpu/digamma_kernel.cu | 23 ++++++ .../kernels/impl/digamma_grad_kernel_impl.h | 55 +++++++++++++ .../pten/kernels/impl/digamma_kernel_impl.h | 49 +++++++++++ 11 files changed, 253 insertions(+), 116 deletions(-) delete mode 100644 paddle/fluid/operators/digamma_op.cu create mode 100644 paddle/pten/kernels/cpu/digamma_grad_kernel.cc create mode 100644 paddle/pten/kernels/cpu/digamma_kernel.cc create mode 100644 paddle/pten/kernels/digamma_grad_kernel.h create mode 100644 paddle/pten/kernels/digamma_kernel.h create mode 100644 paddle/pten/kernels/gpu/digamma_grad_kernel.cu create mode 100644 paddle/pten/kernels/gpu/digamma_kernel.cu create mode 100644 paddle/pten/kernels/impl/digamma_grad_kernel_impl.h create mode 100644 paddle/pten/kernels/impl/digamma_kernel_impl.h diff --git a/paddle/fluid/operators/digamma_op.cc b/paddle/fluid/operators/digamma_op.cc index b1a58817e06..eb0471fec12 100644 --- a/paddle/fluid/operators/digamma_op.cc +++ b/paddle/fluid/operators/digamma_op.cc @@ -64,6 +64,13 @@ class DigammaGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("X"), dout_dims); ctx->ShareLoD(framework::GradVarName("Out"), framework::GradVarName("X")); } + + framework::KernelSignature GetExpectedPtenKernelArgs( + const framework::ExecutionContext &ctx) const override { + return framework::KernelSignature("digamma_grad", + {framework::GradVarName("Out"), "X"}, {}, + {framework::GradVarName("X")}); + } }; template @@ -89,12 +96,3 @@ REGISTER_OPERATOR(digamma, ops::DigammaOp, ops::DigammaOpMaker, ops::DigammaGradOpMaker, ops::DigammaGradOpMaker); REGISTER_OPERATOR(digamma_grad, ops::DigammaGradOp); - -REGISTER_OP_CPU_KERNEL( - digamma, ops::DigammaKernel, - ops::DigammaKernel); - -REGISTER_OP_CPU_KERNEL( - digamma_grad, - ops::DigammaGradKernel, - ops::DigammaGradKernel); diff --git a/paddle/fluid/operators/digamma_op.cu b/paddle/fluid/operators/digamma_op.cu deleted file mode 100644 index 5f2f59ba520..00000000000 --- a/paddle/fluid/operators/digamma_op.cu +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/digamma_op.h" - -namespace ops = paddle::operators; - -REGISTER_OP_CUDA_KERNEL( - digamma, ops::DigammaKernel, - ops::DigammaKernel); - -REGISTER_OP_CUDA_KERNEL( - digamma_grad, - ops::DigammaGradKernel, - ops::DigammaGradKernel); diff --git a/paddle/fluid/operators/digamma_op.h b/paddle/fluid/operators/digamma_op.h index f82628f0204..85f9094e6a0 100644 --- a/paddle/fluid/operators/digamma_op.h +++ b/paddle/fluid/operators/digamma_op.h @@ -14,86 +14,5 @@ limitations under the License. */ #pragma once -#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/platform/for_range.h" - -namespace paddle { -namespace operators { - -template -struct DigammaFunctor { - DigammaFunctor(const T* input, T* output, int64_t numel) - : input_(input), output_(output), numel_(numel) {} - - HOSTDEVICE void operator()(int64_t idx) const { - output_[idx] = Eigen::numext::digamma(input_[idx]); - } - - private: - const T* input_; - T* output_; - int64_t numel_; -}; - -template -struct DigammaGradFunctor { - DigammaGradFunctor(const T* dout, const T* x, T* output, int64_t numel) - : dout_(dout), x_(x), output_(output), numel_(numel) {} - - HOSTDEVICE void operator()(int64_t idx) const { - output_[idx] = dout_[idx] * Eigen::numext::polygamma(T(1), x_[idx]); - } - - private: - const T* dout_; - const T* x_; - T* output_; - int64_t numel_; -}; - -using Tensor = framework::Tensor; - -template -class DigammaKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor* x = context.Input("X"); - Tensor* out = context.Output("Out"); - - auto numel = x->numel(); - auto* x_data = x->data(); - auto* out_data = out->mutable_data(context.GetPlace(), - size_t(x->numel() * sizeof(T))); - - auto& dev_ctx = context.template device_context(); - platform::ForRange for_range(dev_ctx, numel); - DigammaFunctor functor(x_data, out_data, numel); - for_range(functor); - } -}; - -template -class DigammaGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor* d_out = context.Input(framework::GradVarName("Out")); - const Tensor* x = context.Input("X"); - auto* d_x = context.Output(framework::GradVarName("X")); - - auto numel = d_out->numel(); - auto* dout_data = d_out->data(); - auto* x_data = x->data(); - auto* dx_data = d_x->mutable_data( - context.GetPlace(), static_cast(numel * sizeof(T))); - - auto& dev_ctx = context.template device_context(); - platform::ForRange for_range(dev_ctx, numel); - DigammaGradFunctor functor(dout_data, x_data, dx_data, numel); - for_range(functor); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/pten/kernels/cpu/digamma_grad_kernel.cc b/paddle/pten/kernels/cpu/digamma_grad_kernel.cc new file mode 100644 index 00000000000..47be4302e84 --- /dev/null +++ b/paddle/pten/kernels/cpu/digamma_grad_kernel.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/kernels/digamma_grad_kernel.h" +#include "paddle/pten/backends/cpu/cpu_context.h" +#include "paddle/pten/common/scalar.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/impl/digamma_grad_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma_grad, CPU, ALL_LAYOUT, pten::DigammaGradKernel, float, double) {} diff --git a/paddle/pten/kernels/cpu/digamma_kernel.cc b/paddle/pten/kernels/cpu/digamma_kernel.cc new file mode 100644 index 00000000000..6766c3b2999 --- /dev/null +++ b/paddle/pten/kernels/cpu/digamma_kernel.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/kernels/digamma_kernel.h" +#include "paddle/pten/backends/cpu/cpu_context.h" +#include "paddle/pten/common/scalar.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/impl/digamma_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma, CPU, ALL_LAYOUT, pten::DigammaKernel, float, double) {} diff --git a/paddle/pten/kernels/digamma_grad_kernel.h b/paddle/pten/kernels/digamma_grad_kernel.h new file mode 100644 index 00000000000..ef3d084e0a0 --- /dev/null +++ b/paddle/pten/kernels/digamma_grad_kernel.h @@ -0,0 +1,27 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +void DigammaGradKernel(const Context& ctx, + const DenseTensor& out_grad, + const DenseTensor& x, + DenseTensor* x_grad); + +} // namepsace pten diff --git a/paddle/pten/kernels/digamma_kernel.h b/paddle/pten/kernels/digamma_kernel.h new file mode 100644 index 00000000000..af7a2893127 --- /dev/null +++ b/paddle/pten/kernels/digamma_kernel.h @@ -0,0 +1,24 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +void DigammaKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out); + +} // namepsace pten diff --git a/paddle/pten/kernels/gpu/digamma_grad_kernel.cu b/paddle/pten/kernels/gpu/digamma_grad_kernel.cu new file mode 100644 index 00000000000..b87ea5a5cc0 --- /dev/null +++ b/paddle/pten/kernels/gpu/digamma_grad_kernel.cu @@ -0,0 +1,22 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/backends/gpu/gpu_context.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/digamma_grad_kernel.h" +#include "paddle/pten/kernels/impl/digamma_grad_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma_grad, GPU, ALL_LAYOUT, pten::DigammaGradKernel, float, double) {} diff --git a/paddle/pten/kernels/gpu/digamma_kernel.cu b/paddle/pten/kernels/gpu/digamma_kernel.cu new file mode 100644 index 00000000000..8b847c1a476 --- /dev/null +++ b/paddle/pten/kernels/gpu/digamma_kernel.cu @@ -0,0 +1,23 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/backends/gpu/gpu_context.h" +#include "paddle/pten/common/scalar.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/digamma_kernel.h" +#include "paddle/pten/kernels/impl/digamma_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma, GPU, ALL_LAYOUT, pten::DigammaKernel, float, double) {} diff --git a/paddle/pten/kernels/impl/digamma_grad_kernel_impl.h b/paddle/pten/kernels/impl/digamma_grad_kernel_impl.h new file mode 100644 index 00000000000..44f94608b9e --- /dev/null +++ b/paddle/pten/kernels/impl/digamma_grad_kernel_impl.h @@ -0,0 +1,55 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/platform/for_range.h" +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +struct DigammaGradFunctor { + DigammaGradFunctor(const T* dout, const T* x, T* output, int64_t numel) + : dout_(dout), x_(x), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = dout_[idx] * Eigen::numext::polygamma(T(1), x_[idx]); + } + + private: + const T* dout_; + const T* x_; + T* output_; + int64_t numel_; +}; + +template +void DigammaGradKernel(const Context& ctx, + const DenseTensor& out_grad, + const DenseTensor& x, + DenseTensor* x_grad) { + x_grad->mutable_data(ctx.GetPlace()); + + auto* dout_data = out_grad.data(); + auto* x_data = x.data(); + auto* dx_data = x_grad->data(); + auto numel = out_grad.numel(); + platform::ForRange for_range(ctx, numel); + DigammaGradFunctor functor(dout_data, x_data, dx_data, numel); + for_range(functor); +} + +} // namespace pten diff --git a/paddle/pten/kernels/impl/digamma_kernel_impl.h b/paddle/pten/kernels/impl/digamma_kernel_impl.h new file mode 100644 index 00000000000..a47750ff15a --- /dev/null +++ b/paddle/pten/kernels/impl/digamma_kernel_impl.h @@ -0,0 +1,49 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/platform/for_range.h" +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +struct DigammaFunctor { + DigammaFunctor(const T* input, T* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = Eigen::numext::digamma(input_[idx]); + } + + private: + const T* input_; + T* output_; + int64_t numel_; +}; + +template +void DigammaKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) { + out->mutable_data(ctx.GetPlace()); + auto* x_data = x.data(); + auto* out_data = out->data(); + auto numel = x.numel(); + platform::ForRange for_range(ctx, numel); + DigammaFunctor functor(x_data, out_data, numel); + for_range(functor); +} + +} // namespace pten -- GitLab