diff --git a/paddle/fluid/operators/digamma_op.cc b/paddle/fluid/operators/digamma_op.cc index b1a58817e060434d0e309da3476edb5e96b5dfa3..eb0471fec12066482c19bef480057601199e956f 100644 --- a/paddle/fluid/operators/digamma_op.cc +++ b/paddle/fluid/operators/digamma_op.cc @@ -64,6 +64,13 @@ class DigammaGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("X"), dout_dims); ctx->ShareLoD(framework::GradVarName("Out"), framework::GradVarName("X")); } + + framework::KernelSignature GetExpectedPtenKernelArgs( + const framework::ExecutionContext &ctx) const override { + return framework::KernelSignature("digamma_grad", + {framework::GradVarName("Out"), "X"}, {}, + {framework::GradVarName("X")}); + } }; template @@ -89,12 +96,3 @@ REGISTER_OPERATOR(digamma, ops::DigammaOp, ops::DigammaOpMaker, ops::DigammaGradOpMaker, ops::DigammaGradOpMaker); REGISTER_OPERATOR(digamma_grad, ops::DigammaGradOp); - -REGISTER_OP_CPU_KERNEL( - digamma, ops::DigammaKernel, - ops::DigammaKernel); - -REGISTER_OP_CPU_KERNEL( - digamma_grad, - ops::DigammaGradKernel, - ops::DigammaGradKernel); diff --git a/paddle/fluid/operators/digamma_op.cu b/paddle/fluid/operators/digamma_op.cu deleted file mode 100644 index 5f2f59ba520d0fb1e2c083c211bceba0e4a25715..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/digamma_op.cu +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/digamma_op.h" - -namespace ops = paddle::operators; - -REGISTER_OP_CUDA_KERNEL( - digamma, ops::DigammaKernel, - ops::DigammaKernel); - -REGISTER_OP_CUDA_KERNEL( - digamma_grad, - ops::DigammaGradKernel, - ops::DigammaGradKernel); diff --git a/paddle/fluid/operators/digamma_op.h b/paddle/fluid/operators/digamma_op.h index f82628f020480f5eca22079b13e586e1ebf13643..85f9094e6a0bca051f39157ba9ec271cbb830448 100644 --- a/paddle/fluid/operators/digamma_op.h +++ b/paddle/fluid/operators/digamma_op.h @@ -14,86 +14,5 @@ limitations under the License. */ #pragma once -#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/platform/for_range.h" - -namespace paddle { -namespace operators { - -template -struct DigammaFunctor { - DigammaFunctor(const T* input, T* output, int64_t numel) - : input_(input), output_(output), numel_(numel) {} - - HOSTDEVICE void operator()(int64_t idx) const { - output_[idx] = Eigen::numext::digamma(input_[idx]); - } - - private: - const T* input_; - T* output_; - int64_t numel_; -}; - -template -struct DigammaGradFunctor { - DigammaGradFunctor(const T* dout, const T* x, T* output, int64_t numel) - : dout_(dout), x_(x), output_(output), numel_(numel) {} - - HOSTDEVICE void operator()(int64_t idx) const { - output_[idx] = dout_[idx] * Eigen::numext::polygamma(T(1), x_[idx]); - } - - private: - const T* dout_; - const T* x_; - T* output_; - int64_t numel_; -}; - -using Tensor = framework::Tensor; - -template -class DigammaKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor* x = context.Input("X"); - Tensor* out = context.Output("Out"); - - auto numel = x->numel(); - auto* x_data = x->data(); - auto* out_data = out->mutable_data(context.GetPlace(), - size_t(x->numel() * sizeof(T))); - - auto& dev_ctx = context.template device_context(); - platform::ForRange for_range(dev_ctx, numel); - DigammaFunctor functor(x_data, out_data, numel); - for_range(functor); - } -}; - -template -class DigammaGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor* d_out = context.Input(framework::GradVarName("Out")); - const Tensor* x = context.Input("X"); - auto* d_x = context.Output(framework::GradVarName("X")); - - auto numel = d_out->numel(); - auto* dout_data = d_out->data(); - auto* x_data = x->data(); - auto* dx_data = d_x->mutable_data( - context.GetPlace(), static_cast(numel * sizeof(T))); - - auto& dev_ctx = context.template device_context(); - platform::ForRange for_range(dev_ctx, numel); - DigammaGradFunctor functor(dout_data, x_data, dx_data, numel); - for_range(functor); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/pten/kernels/cpu/digamma_grad_kernel.cc b/paddle/pten/kernels/cpu/digamma_grad_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..47be4302e847590381dcee6240e80802152a1243 --- /dev/null +++ b/paddle/pten/kernels/cpu/digamma_grad_kernel.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/kernels/digamma_grad_kernel.h" +#include "paddle/pten/backends/cpu/cpu_context.h" +#include "paddle/pten/common/scalar.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/impl/digamma_grad_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma_grad, CPU, ALL_LAYOUT, pten::DigammaGradKernel, float, double) {} diff --git a/paddle/pten/kernels/cpu/digamma_kernel.cc b/paddle/pten/kernels/cpu/digamma_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..6766c3b2999d9d4c5ab04134176f939b301250d7 --- /dev/null +++ b/paddle/pten/kernels/cpu/digamma_kernel.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/kernels/digamma_kernel.h" +#include "paddle/pten/backends/cpu/cpu_context.h" +#include "paddle/pten/common/scalar.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/impl/digamma_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma, CPU, ALL_LAYOUT, pten::DigammaKernel, float, double) {} diff --git a/paddle/pten/kernels/digamma_grad_kernel.h b/paddle/pten/kernels/digamma_grad_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..ef3d084e0a0a673fa5c2f471713b70d41d74aa1d --- /dev/null +++ b/paddle/pten/kernels/digamma_grad_kernel.h @@ -0,0 +1,27 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +void DigammaGradKernel(const Context& ctx, + const DenseTensor& out_grad, + const DenseTensor& x, + DenseTensor* x_grad); + +} // namepsace pten diff --git a/paddle/pten/kernels/digamma_kernel.h b/paddle/pten/kernels/digamma_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..af7a2893127b16155bb6f37279e57deb053a2753 --- /dev/null +++ b/paddle/pten/kernels/digamma_kernel.h @@ -0,0 +1,24 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +void DigammaKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out); + +} // namepsace pten diff --git a/paddle/pten/kernels/gpu/digamma_grad_kernel.cu b/paddle/pten/kernels/gpu/digamma_grad_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..b87ea5a5cc0ec31f91f15806e05a15bf689a740f --- /dev/null +++ b/paddle/pten/kernels/gpu/digamma_grad_kernel.cu @@ -0,0 +1,22 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/backends/gpu/gpu_context.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/digamma_grad_kernel.h" +#include "paddle/pten/kernels/impl/digamma_grad_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma_grad, GPU, ALL_LAYOUT, pten::DigammaGradKernel, float, double) {} diff --git a/paddle/pten/kernels/gpu/digamma_kernel.cu b/paddle/pten/kernels/gpu/digamma_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b847c1a47631a073c40f76baa3be5f2f4299060 --- /dev/null +++ b/paddle/pten/kernels/gpu/digamma_kernel.cu @@ -0,0 +1,23 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/pten/backends/gpu/gpu_context.h" +#include "paddle/pten/common/scalar.h" +#include "paddle/pten/core/dense_tensor.h" +#include "paddle/pten/core/kernel_registry.h" +#include "paddle/pten/kernels/digamma_kernel.h" +#include "paddle/pten/kernels/impl/digamma_kernel_impl.h" + +PT_REGISTER_KERNEL( + digamma, GPU, ALL_LAYOUT, pten::DigammaKernel, float, double) {} diff --git a/paddle/pten/kernels/impl/digamma_grad_kernel_impl.h b/paddle/pten/kernels/impl/digamma_grad_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..44f94608b9e44ebcf469425c5c6d790146940682 --- /dev/null +++ b/paddle/pten/kernels/impl/digamma_grad_kernel_impl.h @@ -0,0 +1,55 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/platform/for_range.h" +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +struct DigammaGradFunctor { + DigammaGradFunctor(const T* dout, const T* x, T* output, int64_t numel) + : dout_(dout), x_(x), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = dout_[idx] * Eigen::numext::polygamma(T(1), x_[idx]); + } + + private: + const T* dout_; + const T* x_; + T* output_; + int64_t numel_; +}; + +template +void DigammaGradKernel(const Context& ctx, + const DenseTensor& out_grad, + const DenseTensor& x, + DenseTensor* x_grad) { + x_grad->mutable_data(ctx.GetPlace()); + + auto* dout_data = out_grad.data(); + auto* x_data = x.data(); + auto* dx_data = x_grad->data(); + auto numel = out_grad.numel(); + platform::ForRange for_range(ctx, numel); + DigammaGradFunctor functor(dout_data, x_data, dx_data, numel); + for_range(functor); +} + +} // namespace pten diff --git a/paddle/pten/kernels/impl/digamma_kernel_impl.h b/paddle/pten/kernels/impl/digamma_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..a47750ff15a23048a4d843466331fde71cefed60 --- /dev/null +++ b/paddle/pten/kernels/impl/digamma_kernel_impl.h @@ -0,0 +1,49 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/platform/for_range.h" +#include "paddle/pten/core/dense_tensor.h" + +namespace pten { + +template +struct DigammaFunctor { + DigammaFunctor(const T* input, T* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = Eigen::numext::digamma(input_[idx]); + } + + private: + const T* input_; + T* output_; + int64_t numel_; +}; + +template +void DigammaKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) { + out->mutable_data(ctx.GetPlace()); + auto* x_data = x.data(); + auto* out_data = out->data(); + auto numel = x.numel(); + platform::ForRange for_range(ctx, numel); + DigammaFunctor functor(x_data, out_data, numel); + for_range(functor); +} + +} // namespace pten