From 197da15ae4a5a127d1ce1208e2bed4bab05f836a Mon Sep 17 00:00:00 2001 From: xiongkun Date: Tue, 1 Mar 2022 19:00:30 +0800 Subject: [PATCH] [phi] tranfer the selu_op and pass the CI (#39819) * tranfer the selu_op and pass the CI * add sig files * fix code * fix by code review * remove TOOD * change the include position * change the head position --- paddle/fluid/operators/selu_op.cc | 8 -- paddle/fluid/operators/selu_op.cu | 22 ---- paddle/fluid/operators/selu_op.h | 123 ------------------ paddle/phi/kernels/cpu/selu_grad_kernel.cc | 21 +++ paddle/phi/kernels/cpu/selu_kernel.cc | 21 +++ paddle/phi/kernels/gpu/selu_grad_kernel.cu | 22 ++++ paddle/phi/kernels/gpu/selu_kernel.cu | 21 +++ .../phi/kernels/impl/selu_grad_kernel_impl.h | 35 +++++ paddle/phi/kernels/impl/selu_kernel_impl.h | 88 +++++++++++++ paddle/phi/kernels/selu_grad_kernel.h | 29 +++++ paddle/phi/kernels/selu_kernel.h | 28 ++++ paddle/phi/ops/compat/selu_sig.cc | 28 ++++ 12 files changed, 293 insertions(+), 153 deletions(-) delete mode 100644 paddle/fluid/operators/selu_op.cu delete mode 100644 paddle/fluid/operators/selu_op.h create mode 100644 paddle/phi/kernels/cpu/selu_grad_kernel.cc create mode 100644 paddle/phi/kernels/cpu/selu_kernel.cc create mode 100644 paddle/phi/kernels/gpu/selu_grad_kernel.cu create mode 100644 paddle/phi/kernels/gpu/selu_kernel.cu create mode 100644 paddle/phi/kernels/impl/selu_grad_kernel_impl.h create mode 100644 paddle/phi/kernels/impl/selu_kernel_impl.h create mode 100644 paddle/phi/kernels/selu_grad_kernel.h create mode 100644 paddle/phi/kernels/selu_kernel.h create mode 100644 paddle/phi/ops/compat/selu_sig.cc diff --git a/paddle/fluid/operators/selu_op.cc b/paddle/fluid/operators/selu_op.cc index 0adf61d7ce3..88ef1f3ea4a 100644 --- a/paddle/fluid/operators/selu_op.cc +++ b/paddle/fluid/operators/selu_op.cc @@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/selu_op.h" - #include #include #include @@ -127,9 +125,3 @@ REGISTER_OPERATOR(selu, ops::SeluOp, ops::SeluOpMaker, ops::SeluOpInferVarType, ops::SeluGradMaker, ops::SeluGradMaker); REGISTER_OPERATOR(selu_grad, ops::SeluGradOp); -REGISTER_OP_CPU_KERNEL( - selu, ops::SeluKernel, - ops::SeluKernel); -REGISTER_OP_CPU_KERNEL( - selu_grad, ops::SeluGradKernel, - ops::SeluGradKernel); diff --git a/paddle/fluid/operators/selu_op.cu b/paddle/fluid/operators/selu_op.cu deleted file mode 100644 index fb3245ab760..00000000000 --- a/paddle/fluid/operators/selu_op.cu +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#include "paddle/fluid/operators/selu_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - selu, ops::SeluKernel, - ops::SeluKernel); -REGISTER_OP_CUDA_KERNEL( - selu_grad, ops::SeluGradKernel, - ops::SeluGradKernel); diff --git a/paddle/fluid/operators/selu_op.h b/paddle/fluid/operators/selu_op.h deleted file mode 100644 index b2fc834c42f..00000000000 --- a/paddle/fluid/operators/selu_op.h +++ /dev/null @@ -1,123 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/math.h" -#include "paddle/fluid/platform/for_range.h" - -namespace paddle { -namespace operators { - -template -struct SeluFunctor { - SeluFunctor(const T* x_data_ptr, float alpha, float scale, T* y_data_ptr) - : x_data_ptr_(x_data_ptr), - alpha_(alpha), - scale_(scale), - y_data_ptr_(y_data_ptr) {} - - HOSTDEVICE void operator()(size_t idx) const { - T x_ele = x_data_ptr_[idx]; - if (x_ele <= 0) { - x_ele = alpha_ * real_exp(x_ele) - alpha_; - } - y_data_ptr_[idx] = scale_ * x_ele; - } - const T* x_data_ptr_; - const float alpha_; - const float scale_; - T* y_data_ptr_; -}; - -template -struct SeluGradFunctor { - SeluGradFunctor(const T* y_data_ptr, const T* dy_data_ptr, float alpha, - float scale, T* dx_data_ptr) - : y_data_ptr_(y_data_ptr), - dy_data_ptr_(dy_data_ptr), - alpha_(alpha), - scale_(scale), - la_(alpha * scale), - dx_data_ptr_(dx_data_ptr) {} - - HOSTDEVICE void operator()(size_t idx) const { - T y_ele = y_data_ptr_[idx]; - T dy_ele = dy_data_ptr_[idx]; - - float tmp = scale_; - if (y_ele <= 0) { - tmp = y_ele + la_; - } - dx_data_ptr_[idx] = dy_ele * tmp; - } - const T* y_data_ptr_; - const T* dy_data_ptr_; - const float alpha_; - const float scale_; - const float la_; - T* dx_data_ptr_; -}; - -template -class SeluKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - using Tensor = framework::Tensor; - - auto* x = context.Input("X"); - auto* out = context.Output("Out"); - - float alpha = context.Attr("alpha"); - float scale = context.Attr("scale"); - - auto out_ptr = out->mutable_data(context.GetPlace()); - - SeluFunctor functor(x->data(), alpha, scale, out_ptr); - - auto& dev_ctx = context.template device_context(); - size_t limit = static_cast(x->numel()); - platform::ForRange for_range(dev_ctx, limit); - for_range(functor); - } -}; - -template -class SeluGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - using Tensor = framework::Tensor; - - auto* out = context.Input("Out"); - auto* dout = context.Input(framework::GradVarName("Out")); - auto* dx = context.Output(framework::GradVarName("X")); - - float alpha = context.Attr("alpha"); - float scale = context.Attr("scale"); - - auto dx_ptr = dx->mutable_data(context.GetPlace()); - - SeluGradFunctor functor(out->data(), dout->data(), alpha, scale, - dx_ptr); - - auto& dev_ctx = context.template device_context(); - size_t limit = static_cast(out->numel()); - platform::ForRange for_range(dev_ctx, limit); - for_range(functor); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/phi/kernels/cpu/selu_grad_kernel.cc b/paddle/phi/kernels/cpu/selu_grad_kernel.cc new file mode 100644 index 00000000000..32101b19132 --- /dev/null +++ b/paddle/phi/kernels/cpu/selu_grad_kernel.cc @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/selu_grad_kernel.h" +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/selu_grad_kernel_impl.h" + +PD_REGISTER_KERNEL( + selu_grad, CPU, ALL_LAYOUT, phi::SeluGradKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/selu_kernel.cc b/paddle/phi/kernels/cpu/selu_kernel.cc new file mode 100644 index 00000000000..bc5a0616a72 --- /dev/null +++ b/paddle/phi/kernels/cpu/selu_kernel.cc @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/selu_kernel.h" + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/selu_kernel_impl.h" + +PD_REGISTER_KERNEL(selu, CPU, ALL_LAYOUT, phi::SeluKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/selu_grad_kernel.cu b/paddle/phi/kernels/gpu/selu_grad_kernel.cu new file mode 100644 index 00000000000..0ed299413c1 --- /dev/null +++ b/paddle/phi/kernels/gpu/selu_grad_kernel.cu @@ -0,0 +1,22 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/selu_grad_kernel.h" + +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/selu_grad_kernel_impl.h" + +PD_REGISTER_KERNEL( + selu_grad, GPU, ALL_LAYOUT, phi::SeluGradKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/selu_kernel.cu b/paddle/phi/kernels/gpu/selu_kernel.cu new file mode 100644 index 00000000000..99303d8c18a --- /dev/null +++ b/paddle/phi/kernels/gpu/selu_kernel.cu @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/selu_kernel.h" + +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/selu_kernel_impl.h" + +PD_REGISTER_KERNEL(selu, GPU, ALL_LAYOUT, phi::SeluKernel, float, double) {} diff --git a/paddle/phi/kernels/impl/selu_grad_kernel_impl.h b/paddle/phi/kernels/impl/selu_grad_kernel_impl.h new file mode 100644 index 00000000000..d09c87b0a4e --- /dev/null +++ b/paddle/phi/kernels/impl/selu_grad_kernel_impl.h @@ -0,0 +1,35 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/phi/kernels/impl/selu_kernel_impl.h" + +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { +template +void SeluGradKernel(const Context& dev_ctx, + const DenseTensor& out, + const DenseTensor& dout, + float scale, + float alpha, + DenseTensor* dx) { + auto dx_ptr = dev_ctx.template Alloc(dx); + SeluGradFunctor functor( + out.data(), dout.data(), alpha, scale, dx_ptr); + size_t limit = static_cast(out.numel()); + paddle::platform::ForRange for_range(dev_ctx, limit); + for_range(functor); +} +} // namespace phi diff --git a/paddle/phi/kernels/impl/selu_kernel_impl.h b/paddle/phi/kernels/impl/selu_kernel_impl.h new file mode 100644 index 00000000000..888bac42bfd --- /dev/null +++ b/paddle/phi/kernels/impl/selu_kernel_impl.h @@ -0,0 +1,88 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/operators/math.h" +#include "paddle/fluid/platform/for_range.h" +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +struct SeluFunctor { + SeluFunctor(const T* x_data_ptr, float alpha, float scale, T* y_data_ptr) + : x_data_ptr_(x_data_ptr), + alpha_(alpha), + scale_(scale), + y_data_ptr_(y_data_ptr) {} + + HOSTDEVICE void operator()(size_t idx) const { + T x_ele = x_data_ptr_[idx]; + if (x_ele <= 0) { + x_ele = alpha_ * paddle::operators::real_exp(x_ele) - alpha_; + } + y_data_ptr_[idx] = scale_ * x_ele; + } + const T* x_data_ptr_; + const float alpha_; + const float scale_; + T* y_data_ptr_; +}; + +template +struct SeluGradFunctor { + SeluGradFunctor(const T* y_data_ptr, + const T* dy_data_ptr, + float alpha, + float scale, + T* dx_data_ptr) + : y_data_ptr_(y_data_ptr), + dy_data_ptr_(dy_data_ptr), + alpha_(alpha), + scale_(scale), + la_(alpha * scale), + dx_data_ptr_(dx_data_ptr) {} + + HOSTDEVICE void operator()(size_t idx) const { + T y_ele = y_data_ptr_[idx]; + T dy_ele = dy_data_ptr_[idx]; + + float tmp = scale_; + if (y_ele <= 0) { + tmp = y_ele + la_; + } + dx_data_ptr_[idx] = dy_ele * tmp; + } + const T* y_data_ptr_; + const T* dy_data_ptr_; + const float alpha_; + const float scale_; + const float la_; + T* dx_data_ptr_; +}; + +template +void SeluKernel(const Context& dev_ctx, + const DenseTensor& x, + float scale, + float alpha, + DenseTensor* out) { + auto out_ptr = dev_ctx.template Alloc(out); + SeluFunctor functor(x.data(), alpha, scale, out_ptr); + size_t limit = static_cast(x.numel()); + paddle::platform::ForRange for_range(dev_ctx, limit); + for_range(functor); +} +} // namespace phi diff --git a/paddle/phi/kernels/selu_grad_kernel.h b/paddle/phi/kernels/selu_grad_kernel.h new file mode 100644 index 00000000000..42cde6deabe --- /dev/null +++ b/paddle/phi/kernels/selu_grad_kernel.h @@ -0,0 +1,29 @@ + +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void SeluGradKernel(const Context& dev_ctx, + const DenseTensor& out, + const DenseTensor& d_out, + float scale, + float alpha, + DenseTensor* d_x); +} // namespace phi diff --git a/paddle/phi/kernels/selu_kernel.h b/paddle/phi/kernels/selu_kernel.h new file mode 100644 index 00000000000..cd5d27e98cc --- /dev/null +++ b/paddle/phi/kernels/selu_kernel.h @@ -0,0 +1,28 @@ + +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void SeluKernel(const Context& dev_ctx, + const DenseTensor& x, + float scale, + float alpha, + DenseTensor* out); +} // phi diff --git a/paddle/phi/ops/compat/selu_sig.cc b/paddle/phi/ops/compat/selu_sig.cc new file mode 100644 index 00000000000..23f5cc34515 --- /dev/null +++ b/paddle/phi/ops/compat/selu_sig.cc @@ -0,0 +1,28 @@ + +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +KernelSignature SeluGradGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("selu_grad", + {"Out", GradVarName("Out")}, + {"scale", "alpha"}, + {GradVarName("X")}); +} +} // namespace phi +PD_REGISTER_ARG_MAPPING_FN(selu_grad, phi::SeluGradGradOpArgumentMapping); -- GitLab