/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/phi/kernels/funcs/activation_functor.h" namespace paddle { namespace operators { template struct CudaSoftReluFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // soft_relu(x) = log(1 + exp(max(min(x, threshold), -threshold))) // threshold should not be negative __device__ __forceinline__ T operator()(const T arg_x) const { MPType x = static_cast(arg_x); MPType t = static_cast(threshold); MPType temp_min = x < t ? x : t; MPType temp_max = temp_min > -t ? temp_min : -t; return static_cast(log(one + exp(temp_max))); } }; template struct CudaSoftReluGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (out > -threshold && out < threshold) ? dout * (1 - exp(-out)) : 0 // threshold should not be negative __device__ __forceinline__ T operator()(const T arg_dout, const T arg_out) const { MPType dout = static_cast(arg_dout); MPType out = static_cast(arg_out); MPType t = static_cast(threshold); return (out > -t && out < t) ? static_cast(dout * (one - exp(-out))) : static_cast(0.0f); } static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepOut; } }; template struct CudaRelu6Functor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // relu6(x) = min(max(0, x), 6) __device__ __forceinline__ T operator()(const T x) const { T t = static_cast(threshold); return x <= zero ? zero : (x < t ? x : t); } }; template struct CudaRelu6GradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (out > 0 && out < t) ? dout : 0 __device__ __forceinline__ T operator()(const T dout, const T out) const { T t = static_cast(threshold); return (out > zero && out < t) ? dout : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepOut; } }; template struct CudaSoftsignFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // softsign(x) = x / (1 + abs(x)) __device__ __forceinline__ T operator()(const T x) const { return x / (one + abs(x)); } }; template struct CudaSoftsignGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + abs(x))^2 __device__ __forceinline__ T operator()(const T dout, const T x) const { T temp = one + abs(x); return dout / (temp * temp); } static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; } }; template class ActivationCudaKernel : public framework::OpKernel { public: using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor* x = nullptr; framework::Tensor* out = nullptr; ExtractActivationTensor(ctx, &x, &out); out->mutable_data(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context(); std::vector ins = {x}; std::vector outs = {out}; auto functor = Functor(); auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = ctx.Attr(attr.first); } paddle::operators::LaunchSameDimsElementwiseCudaKernel(dev_ctx, ins, &outs, functor); } }; template class ActivationGradCudaKernel : public framework::OpKernel { public: using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor *x, *out, *d_out; framework::Tensor* d_x = nullptr; x = out = d_out = nullptr; ExtractActivationGradTensor(ctx, &x, &out, &d_out, &d_x); d_x->mutable_data(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context(); auto functor = Functor(); auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = ctx.Attr(attr.first); } std::vector ins = {d_out}; std::vector outs = {d_x}; if (static_cast(Functor::FwdDeps()) == static_cast(ActBwdOpFwdDeps::kDepOut)) { // Only need forward output Out ins.push_back(out); paddle::operators::LaunchSameDimsElementwiseCudaKernel(dev_ctx, ins, &outs, functor); } else if (static_cast(Functor::FwdDeps()) == static_cast(ActBwdOpFwdDeps::kDepX)) { // Only need forward input X ins.push_back(x); paddle::operators::LaunchSameDimsElementwiseCudaKernel(dev_ctx, ins, &outs, functor); } else { paddle::operators::LaunchSameDimsElementwiseCudaKernel(dev_ctx, ins, &outs, functor); } } }; USE_PHI_FUNCTOR(CudaCos) USE_PHI_FUNCTOR(CudaTan) USE_PHI_FUNCTOR(CudaAcos) USE_PHI_FUNCTOR(CudaSin) USE_PHI_FUNCTOR(CudaAsin) USE_PHI_FUNCTOR(CudaAtan) USE_PHI_FUNCTOR(CudaSinh) USE_PHI_FUNCTOR(CudaCosh) USE_PHI_FUNCTOR(CudaAsinh) USE_PHI_FUNCTOR(CudaAcosh) USE_PHI_FUNCTOR(CudaAtanh) USE_PHI_FUNCTOR(CudaTanh) USE_PHI_FUNCTOR(CudaBRelu) USE_PHI_FUNCTOR(CudaLeakyRelu) USE_PHI_FUNCTOR(CudaThresholdedRelu) USE_PHI_FUNCTOR(CudaHardShrink) USE_PHI_FUNCTOR(CudaSoftShrink) USE_PHI_FUNCTOR(CudaTanhShrink) USE_PHI_FUNCTOR(CudaSilu) USE_PHI_FUNCTOR(CudaELU) USE_PHI_FUNCTOR(CudaSigmoid) USE_PHI_FUNCTOR(CudaLogSigmoid) USE_PHI_FUNCTOR(CudaHardSigmoid) USE_PHI_FUNCTOR(CudaLog) USE_PHI_FUNCTOR(CudaLog2) USE_PHI_FUNCTOR(CudaLog10) USE_PHI_FUNCTOR(CudaLog1p) USE_PHI_FUNCTOR(CudaSwish) USE_PHI_FUNCTOR(CudaHardSwish) template using CudaRoundFunctor = phi::funcs::CudaRoundFunctor; template using CudaFloorFunctor = phi::funcs::CudaFloorFunctor; template using CudaCeilFunctor = phi::funcs::CudaCeilFunctor; template using CudaZeroGradFunctor = phi::funcs::CudaZeroGradFunctor; USE_PHI_FUNCTOR(CudaExp) USE_PHI_FUNCTOR(CudaExpm1) USE_PHI_FUNCTOR(CudaMish) USE_PHI_FUNCTOR(CudaSTanh) USE_PHI_FUNCTOR(CudaReciprocal) USE_PHI_FUNCTOR(CudaSquare) USE_PHI_FUNCTOR(CudaSqrt) USE_PHI_FUNCTOR(CudaRsqrt) USE_PHI_FUNCTOR(CudaSoftplus) template using CudaELUGradNegativeAlphaFunctor = phi::funcs::CudaELUGradNegativeAlphaFunctor; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; #define REGISTER_ACTIVATION_CUDA_KERNEL(act_type, op_name, functor, \ grad_functor) \ REGISTER_OP_CUDA_KERNEL( \ act_type, ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>); \ REGISTER_OP_CUDA_KERNEL( \ act_type##_grad, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>); #define REGISTER_ACTIVATION_CUDA_KERNEL_INT(act_type, op_name, functor, \ grad_functor) \ REGISTER_OP_CUDA_KERNEL( \ act_type, ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>); \ REGISTER_OP_CUDA_KERNEL( \ act_type##_grad, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>); REGISTER_OP_CUDA_KERNEL( relu6, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>); REGISTER_OP_CUDA_KERNEL( relu6_grad, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>); #define FOR_EACH_ACTIVATION_CUDA_OP(__macro) \ __macro(soft_relu, SoftRelu, CudaSoftReluFunctor, CudaSoftReluGradFunctor); \ __macro(softsign, Softsign, CudaSoftsignFunctor, CudaSoftsignGradFunctor); FOR_EACH_ACTIVATION_CUDA_OP(REGISTER_ACTIVATION_CUDA_KERNEL) #ifdef PADDLE_WITH_XPU_KP REGISTER_OP_KERNEL( brelu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( brelu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(ceil, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( ceil_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( celu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( celu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(elu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( elu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(exp, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( exp_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(floor, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( floor_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( hard_shrink, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( hard_shrink_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( hard_sigmoid, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( hard_sigmoid_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(hard_swish, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( hard_swish_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( leaky_relu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( leaky_relu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(log, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( log_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(log1p, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( log1p_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( logsigmoid, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( logsigmoid_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( reciprocal, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( reciprocal_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( relu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( relu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(relu6, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( relu6_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(sigmoid, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( sigmoid_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(silu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( silu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(soft_relu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( soft_relu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(softplus, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( softplus_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( softshrink, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( softshrink_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(softsign, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( softsign_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(sqrt, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( sqrt_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(square, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( square_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL(swish, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( swish_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); REGISTER_OP_KERNEL( thresholded_relu, KP, plat::XPUPlace, ops::ActivationCudaKernel>); REGISTER_OP_KERNEL( thresholded_relu_grad, KP, plat::XPUPlace, ops::ActivationGradCudaKernel>); #endif // PADDLE_WITH_XPU_KP