/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/operators/math/math_cuda_utils.h" #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { template struct CudaReluFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); // relu(x) = max(x, 0) __device__ __forceinline__ T operator()(const T& x) const { return x > zero ? x : zero; } }; template struct CudaReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); // dx = dout * (out > 0) __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return out > zero ? dout : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaLeakyReluFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // leakyrelu(x) = x > 0 ? x : alpha * x __device__ __forceinline__ T operator()(const T& x) const { return x > zero ? x : static_cast(alpha) * x; } }; template struct CudaLeakyReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // dx = dout * (x > 0 ? 1 : alpha) __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return x > zero ? dout : static_cast(alpha) * dout; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSigmoidFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // sigmoid(x) = 1 / (1 + exp(-x)) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(one / (one + exp(-x))); } }; template struct CudaSigmoidGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout * out * (1 - out) __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return dout * out * (one - out); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaSiluFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // silu(x) = x / (1 + exp(-x)) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(x / (one + exp(-x))); } }; template struct CudaSiluGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // dx = dout * (1 + exp(-x) + x * exp(-x) / (1 + exp(-x))^2) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); MPType temp = one / (one + exp(-x)); return static_cast(dout * (temp * (one + x * (one - temp)))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaLogSigmoidFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType zero = static_cast(0.0f); // logsigmoid(x) = log(1 / (1 + exp(-x))) // For numerical stability, // logsigmoid(x) = // - (max(-x, 0) + log(exp(-max(-x, 0)) + exp(-x - max(-x, 0)))) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); MPType temp = x > zero ? zero : -x; return static_cast(-temp - log(exp(-temp) + exp(-x - temp))); } }; template struct CudaLogSigmoidGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType zero = static_cast(0.0f); // dx = dout * exp(-x) / (1 + exp(-x)) // For numerical stability: // dx = dout * exp(-x - max(-x, 0)) / (exp(-max(-x, 0)) + exp(-x - max(-x, // 0))) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); MPType temp1 = x > zero ? zero : -x; MPType temp2 = exp(-x - temp1); return static_cast(dout * (temp2 / (exp(-temp1) + temp2))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaAtanFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // atan(x) = atan(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(atan(x)); } }; template struct CudaAtanGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + x^2) __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return dout / (one + x * x); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftShrinkFunctor : public BaseActivationFunctor { float lambda; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"lambda", &lambda}}; } // softshrink(x) = x - lambda, if x > lambda; // x + lambda, if x < -lambda; // 0, otherwise. __device__ __forceinline__ T operator()(const T& x) const { T l = static_cast(lambda); T temp1 = static_cast(x > l); T temp2 = static_cast(x < -l); return temp1 * (x - l) + temp2 * (x + l); } }; template struct CudaSoftShrinkGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float lambda; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"lambda", &lambda}}; } // dx = dout, if x > lambda or x < -lambda else 0 __device__ __forceinline__ T operator()(const T& dout, const T& x) const { T l = static_cast(lambda); return (x >= -l && x <= l) ? zero : dout; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaCeilFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // ceil(x) = ceil(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(ceil(x)); } }; template struct CudaFloorFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // floor(x) = floor(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(floor(x)); } }; template struct CudaRoundFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // round(x) = round(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(round(x)); } }; // GradFunctor for ceil, floor and round template struct CudaZeroGradFunctor : public BaseActivationFunctor { __device__ __forceinline__ T operator()(const T& x) const { return static_cast(0.0f); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kNoDeps; } }; template struct CudaCosFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // cos(x) = cos(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(cos(x)); } }; template struct CudaCosGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * (-sin(x)) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(-dout * sin(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSinFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sin(x) = sin(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(sin(x)); } }; template struct CudaSinGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * cos(x) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(dout * cos(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaTanFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tan(x) = tan(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(tan(x)); } }; template struct CudaTanGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout / cos(x)^2 __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(dout / (cos(x) * cos(x))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaAsinFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // asin(x) = asin(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(asin(x)); } }; template struct CudaAsinGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // dx = dout / sqrt(1 - x^2) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(dout / sqrt(one - x * x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaAcosFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // acos(x) = acos(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(acos(x)); } }; template struct CudaAcosGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // dx = -dout / sqrt(1 - x^2) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(-dout / sqrt(one - x * x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaCoshFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // cosh(x) = cosh(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(cosh(x)); } }; template struct CudaCoshGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * sinh(x) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(dout * sinh(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSinhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sinh(x) = sinh(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(sinh(x)); } }; template struct CudaSinhGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * cosh(x) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(dout * cosh(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaTanhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tanh(x) = tanh(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(tanh(x)); } }; template struct CudaTanhGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout * (1 - out^2) __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return dout * (one - out * out); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaReciprocalFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // reciprocal(x) = 1 / x __device__ __forceinline__ T operator()(const T& x) const { return one / x; } }; template struct CudaReciprocalGradFunctor : public BaseActivationFunctor { // dx = -dout * out^2 __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return -dout * out * out; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaExpFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // exp(x) = exp(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(exp(x)); } }; template struct CudaExpGradFunctor : public BaseActivationFunctor { // dx = dout * out __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return dout * out; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaExpm1Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // expm1(x) = expm1(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(expm1(x)); } }; template struct CudaExpm1GradFunctor : public BaseActivationFunctor { // dx = dout * out __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return dout * out + dout; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaLogFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log(x) = log(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(log(x)); } }; template struct CudaLogGradFunctor : public BaseActivationFunctor { // dx = dout / x __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return dout / x; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSquareFunctor : public BaseActivationFunctor { // square(x) = x * x __device__ __forceinline__ T operator()(const T& x) const { return x * x; } }; template struct CudaSquareGradFunctor : public BaseActivationFunctor { T two = static_cast(2.0f); // dx = dout * 2 * x __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return dout * two * x; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSqrtFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sqrt(x) = sqrt(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(sqrt(x)); } }; template struct CudaSqrtGradFunctor : public BaseActivationFunctor { T one_half = static_cast(0.5f); // dx = dout * 0.5 / out __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return one_half * dout / out; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaRsqrtFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // rsqrt(x) = rsqrt(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(rsqrt(x)); } }; template struct CudaRsqrtGradFunctor : public BaseActivationFunctor { T minus_one_half = static_cast(-0.5f); // dx = -0.5 * dout * out^3 __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return minus_one_half * dout * out * out * out; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaLog1pFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // log1p(x) = log(1 + x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(log(one + x)); } }; template struct CudaLog1pGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + x) __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return dout / (one + x); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaLog2Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log2(x) = log2(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(log2(x)); } }; template struct CudaLog2GradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; T log_two = static_cast(log(static_cast(2.0f))); // dx = dout / (x * log(2)) __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return dout / (x * log_two); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaLog10Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log10(x) = log10(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(log10(x)); } }; template struct CudaLog10GradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; T log_ten = static_cast(log(static_cast(10.0f))); // dx = dout / (x * log(10)) __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return dout / (x * log_ten); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaBReluFunctor : public BaseActivationFunctor { float t_min; float t_max; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"t_min", &t_min}, {"t_max", &t_max}}; } // brelu(x) = min(max(x, t_min), t_max) __device__ __forceinline__ T operator()(const T& x) const { T t_min_cast = static_cast(t_min); T t_max_cast = static_cast(t_max); T temp_max = x > t_min_cast ? x : t_min_cast; T temp_min = temp_max < t_max_cast ? temp_max : t_max_cast; return temp_min; } }; template struct CudaBReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float t_min; float t_max; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"t_min", &t_min}, {"t_max", &t_max}}; } // dx = (x > t_min && x < t_max) ? dout : 0 __device__ __forceinline__ T operator()(const T& dout, const T& x) const { T t_min_cast = static_cast(t_min); T t_max_cast = static_cast(t_max); return (x > t_min_cast && x < t_max_cast) ? dout : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftReluFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // soft_relu(x) = log(1 + exp(max(min(x, threshold), -threshold))) // threshold should not be negative __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); MPType t = static_cast(threshold); MPType temp_min = x < t ? x : t; MPType temp_max = temp_min > -t ? temp_min : -t; return static_cast(log(one + exp(temp_max))); } }; template struct CudaSoftReluGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (out > -threshold && out < threshold) ? dout * (1 - exp(-out)) : 0 // threshold should not be negative __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_out) const { MPType dout = static_cast(arg_dout); MPType out = static_cast(arg_out); MPType t = static_cast(threshold); return (out > -t && out < t) ? static_cast(dout * (one - exp(-out))) : static_cast(0.0f); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaSTanhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; float scale_a; float scale_b; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; } // stanh(x) = b * tanh(a * x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); MPType a = static_cast(scale_a); MPType b = static_cast(scale_b); return static_cast(b * tanh(a * x)); } }; template struct CudaSTanhGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float scale_a; float scale_b; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; } // dx = dout * a * b * (1 - tanh(a * x) * tanh(a * x)) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); MPType a = static_cast(scale_a); MPType b = static_cast(scale_b); MPType temp = tanh(a * x); return static_cast(dout * a * b * (one - temp * temp)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftplusFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}, {"threshold", &threshold}}; } // softplus(x) = beta * x > threshold ? x : log(1 + exp(beta * x)) / beta __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); MPType b = static_cast(beta); MPType t = static_cast(threshold); MPType x_beta = x * beta; return static_cast(x_beta > t ? x : log(one + exp(x_beta)) / b); } }; template struct CudaSoftplusGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}, {"threshold", &threshold}}; } // dx = x * beta > threshold ? dout : dout / (1 + exp(-beta * x)) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); MPType b = static_cast(beta); MPType t = static_cast(threshold); MPType x_beta = x * beta; return x_beta > t ? arg_dout : static_cast(dout / (one + exp(-x_beta))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftsignFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // softsign(x) = x / (1 + abs(x)) __device__ __forceinline__ T operator()(const T& x) const { return x / (one + abs(x)); } }; template struct CudaSoftsignGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + abs(x))^2 __device__ __forceinline__ T operator()(const T& dout, const T& x) const { T temp = one + abs(x); return dout / (temp * temp); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaRelu6Functor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // relu6(x) = min(max(0, x), 6) __device__ __forceinline__ T operator()(const T& x) const { T t = static_cast(threshold); return x <= zero ? zero : (x < t ? x : t); } }; template struct CudaRelu6GradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (out > 0 && out < t) ? dout : 0 __device__ __forceinline__ T operator()(const T& dout, const T& out) const { T t = static_cast(threshold); return (out > zero && out < t) ? dout : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaTanhShrinkFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tanhshrink(x) = x - tanh(x) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); return static_cast(x - tanh(x)); } }; template struct CudaTanhShrinkGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * tanh(x)^2 __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); return static_cast(dout * tanh(x) * tanh(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaHardShrinkFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // hadrshrink(x) = (x > -threshold && x < threshold) ? 0 : x __device__ __forceinline__ T operator()(const T& x) const { T t = static_cast(threshold); return (x > -t && x < t) ? zero : x; } }; template struct CudaHardShrinkGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (x > -threshold && x < threshold) ? 0 : dout __device__ __forceinline__ T operator()(const T& dout, const T& x) const { T t = static_cast(threshold); return (x > -t && x < t) ? zero : dout; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaHardSigmoidFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); T one = static_cast(1.0f); float slope; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"slope", &slope}, {"offset", &offset}}; } // hard_sigmoid(x) = 0, when x <= -3 // 1, when x >= 3 // x * slope + offset, otherwise __device__ __forceinline__ T operator()(const T& x) const { T temp = x * static_cast(slope) + static_cast(offset); T temp_max = temp > zero ? temp : zero; T temp_min = temp_max < one ? temp_max : one; return temp_min; } }; template struct CudaHardSigmoidGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); T one = static_cast(1.0f); float slope; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"slope", &slope}, {"offset", &offset}}; } // dx = (out > 0 && out < 1) ? dout * slope : 0 __device__ __forceinline__ T operator()(const T& dout, const T& out) const { return (out > zero && out < one) ? dout * static_cast(slope) : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaSwishFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}}; } // swish(x) = x / (1 + exp(-beta * x)) __device__ __forceinline__ T operator()(const T& arg_x) const { MPType x = static_cast(arg_x); MPType b = static_cast(beta); return static_cast(x / (one + exp(-b * x))); } }; template struct CudaSwishGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}}; } // dx = dout * (1 + exp(-b * x) + b * x * exp(-b * x) / (1 + exp(-b * x))^2) __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType x = static_cast(arg_x); MPType b = static_cast(beta); MPType temp1 = one / (one + exp(-b * x)); MPType out = x * temp1; MPType temp2 = b * out; MPType temp3 = temp1 * (one - temp2); return static_cast(dout * (temp2 + temp3)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaThresholdedReluFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // thresholded_relu(x) = x > threshold ? x : 0 __device__ __forceinline__ T operator()(const T& x) const { return x > static_cast(threshold) ? x : zero; } }; template struct CudaThresholdedReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = x > threshold ? dout : 0 __device__ __forceinline__ T operator()(const T& dout, const T& x) const { return x > static_cast(threshold) ? dout : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaHardSwishFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; float scale; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}, {"scale", &scale}, {"offset", &offset}}; } // hard_swish(x) = 0, when x <= -offset // x , when x >= threshold - offset // x * (x + offset) / scale, otherwise // threshold = scale = 6, offset = 3 by default __device__ __forceinline__ T operator()(const T& x) const { T t = static_cast(threshold); T temp = x + static_cast(offset); T temp_max = temp > zero ? temp : zero; T temp_min = temp_max < t ? temp_max : t; return temp_min * x / static_cast(scale); } }; template struct CudaHardSwishGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); T one = static_cast(1.0f); T two = static_cast(2.0f); float threshold; float scale; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}, {"scale", &scale}, {"offset", &offset}}; } // dx = 0, when x <= -offset // dout , when x >= threshold - offset // dout * (2 * x / scale + offset / scale), otherwise // threshold = scale = 6, offset = 3 by default __device__ __forceinline__ T operator()(const T& dout, const T& x) const { T o = static_cast(offset); T s = static_cast(scale); T temp1 = static_cast(x + o > zero); T temp2 = static_cast(x + o < static_cast(threshold)); return dout * (temp1 * temp2 * (two * x + o) / s + one - temp2); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaELUFunctor : public BaseActivationFunctor { using CT = typename details::MPTypeTrait::Type; CT zero = static_cast(0.0f); CT one = static_cast(1.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // elu(x) = x, if x > 0 // elu(x) = alpha * (e^x - 1), if x <= 0 __device__ __forceinline__ T operator()(const T& arg_x) const { CT x = static_cast(arg_x); CT temp = static_cast(alpha) * (exp(x) - one); CT res = x > zero ? x : temp; return static_cast(res); } }; template struct CudaELUGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType zero = static_cast(0.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // case 1: alpha >= 0 // dx = dout, if out > 0 // dx = dout * (out + alpha), if out <= 0 __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_out) const { MPType dout = static_cast(arg_dout); MPType out = static_cast(arg_out); MPType a = static_cast(alpha); MPType out_pos = static_cast(out > zero); MPType out_neg = static_cast(out <= zero); return static_cast(dout * (out_pos + out_neg * (out + a))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaELUGradNegativeAlphaFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType zero = static_cast(0.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // case 2: alpha < 0 // dx = dout, if x > 0 // dx = dout * (out + alpha), if x <=0 __device__ __forceinline__ T operator()(const T& arg_dout, const T& arg_out, const T& arg_x) const { MPType dout = static_cast(arg_dout); MPType out = static_cast(arg_out); MPType x = static_cast(arg_x); MPType a = static_cast(alpha); MPType x_pos = static_cast(x > zero); MPType x_neg = static_cast(x <= zero); return static_cast(dout * (x_pos + x_neg * (out + a))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template class ELUGradCudaKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* d_out = ctx.Input(framework::GradVarName("Out")); auto* out = ctx.Input("Out"); auto* x = ctx.Input("X"); auto* d_x = ctx.Output(framework::GradVarName("X")); d_x->mutable_data(ctx.GetPlace()); const float alpha = ctx.Attr("alpha"); auto& dev_ctx = ctx.device_context(); std::vector ins = {d_out, out}; std::vector outs = {d_x}; if (alpha > 0) { CudaELUGradFunctor functor; functor.alpha = alpha; LaunchSameDimsElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } else { CudaELUGradNegativeAlphaFunctor functor; functor.alpha = alpha; ins.push_back(x); LaunchSameDimsElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } } }; template class ActivationCudaKernel : public framework::OpKernel { public: using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor* x = nullptr; framework::Tensor* out = nullptr; ExtractActivationTensor(ctx, &x, &out); out->mutable_data(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context(); std::vector ins = {x}; std::vector outs = {out}; auto functor = Functor(); auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = ctx.Attr(attr.first); } LaunchSameDimsElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } }; template class ActivationGradCudaKernel : public framework::OpKernel { public: using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor *x, *out, *d_out; framework::Tensor* d_x = nullptr; x = out = d_out = nullptr; ExtractActivationGradTensor(ctx, &x, &out, &d_out, &d_x); d_x->mutable_data(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context(); auto functor = Functor(); auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = ctx.Attr(attr.first); } std::vector ins = {d_out}; std::vector outs = {d_x}; if (static_cast(Functor::FwdDeps()) == static_cast(kDepOut)) { // Only need forward output Out ins.push_back(out); LaunchSameDimsElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } else if (static_cast(Functor::FwdDeps()) == static_cast(kDepX)) { // Only need forward input X ins.push_back(x); LaunchSameDimsElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } else { LaunchSameDimsElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; #define REGISTER_ACTIVATION_CUDA_KERNEL(act_type, op_name, functor, \ grad_functor) \ REGISTER_OP_CUDA_KERNEL( \ act_type, ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>); \ REGISTER_OP_CUDA_KERNEL( \ act_type##_grad, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>); #define REGISTER_ACTIVATION_CUDA_KERNEL_INT(act_type, op_name, functor, \ grad_functor) \ REGISTER_OP_CUDA_KERNEL( \ act_type, ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>); \ REGISTER_OP_CUDA_KERNEL( \ act_type##_grad, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>); /* ======================== leaky relu register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL(leaky_relu, LeakyRelu, CudaLeakyReluFunctor, CudaLeakyReluGradFunctor); REGISTER_OP_CUDA_KERNEL( leaky_relu_grad_grad, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel< plat::CUDADeviceContext, ops::LeakyReluGradGradFunctor>); /* ========================================================================== */ /* ======================== elu register ============================ */ REGISTER_OP_CUDA_KERNEL( elu, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>); REGISTER_OP_CUDA_KERNEL( elu_grad, ops::ELUGradCudaKernel, ops::ELUGradCudaKernel, ops::ELUGradCudaKernel); REGISTER_OP_CUDA_KERNEL( elu_grad_grad, ops::ELUDoubleGradKernel>, ops::ELUDoubleGradKernel>, ops::ELUDoubleGradKernel>); /* ========================================================================== */ /* =========================== relu register ============================ */ #ifdef PADDLE_WITH_HIP REGISTER_ACTIVATION_CUDA_KERNEL(relu, Relu, CudaReluFunctor, CudaReluGradFunctor); REGISTER_OP_CUDA_KERNEL( relu_grad_grad, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>); #else REGISTER_OP_CUDA_KERNEL( relu, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>); REGISTER_OP_CUDA_KERNEL( relu_grad, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>); REGISTER_OP_CUDA_KERNEL( relu_grad_grad, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>); #endif /* ========================================================================== */ /* =========================== sigmoid register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL(sigmoid, Sigmoid, CudaSigmoidFunctor, CudaSigmoidGradFunctor); REGISTER_OP_CUDA_KERNEL( sigmoid_grad_grad, ops::SigmoidDoubleGradKernel>, ops::SigmoidDoubleGradKernel>, ops::SigmoidDoubleGradKernel>); /* ========================================================================== */ /* =========================== tanh register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL(tanh, Tanh, CudaTanhFunctor, CudaTanhGradFunctor); REGISTER_OP_CUDA_KERNEL( tanh_grad_grad, ops::TanhDoubleGradKernel>, ops::TanhDoubleGradKernel>, ops::TanhDoubleGradKernel>); /* ========================================================================== */ /* =========================== sqrt register ============================= */ REGISTER_ACTIVATION_CUDA_KERNEL(sqrt, Sqrt, CudaSqrtFunctor, CudaSqrtGradFunctor); REGISTER_OP_CUDA_KERNEL( sqrt_grad_grad, ops::SqrtDoubleGradKernel>, ops::SqrtDoubleGradKernel>, ops::SqrtDoubleGradKernel>); /* ========================================================================== */ /* =========================== rsqrt register ============================= */ REGISTER_ACTIVATION_CUDA_KERNEL(rsqrt, Rsqrt, CudaRsqrtFunctor, CudaRsqrtGradFunctor); REGISTER_OP_CUDA_KERNEL( rsqrt_grad_grad, ops::RsqrtDoubleGradKernel>, ops::RsqrtDoubleGradKernel>, ops::RsqrtDoubleGradKernel>); /* ========================================================================== */ /* =========================== square register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL_INT(square, Square, CudaSquareFunctor, CudaSquareGradFunctor); REGISTER_OP_CUDA_KERNEL( square_grad_grad, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>); /* ========================================================================== */ /* ========================== pow register ============================ */ REGISTER_OP_CUDA_KERNEL( pow, ops::PowKernel>, ops::PowKernel>, ops::PowKernel>, ops::PowKernel>, ops::PowKernel>); REGISTER_OP_CUDA_KERNEL( pow_grad, ops::PowGradKernel>, ops::PowGradKernel>, ops::PowGradKernel>, ops::PowGradKernel>, ops::PowGradKernel>); /* ========================================================================== */ /* ========================== exp register ============================ */ REGISTER_OP_CUDA_KERNEL( exp, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationKernel>, ops::ActivationKernel>, ops::ActivationCudaKernel>); REGISTER_OP_CUDA_KERNEL( exp_grad, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>); /* ========================================================================== */ /* ========================== expm1 register ============================ */ REGISTER_OP_CUDA_KERNEL( expm1, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>); REGISTER_OP_CUDA_KERNEL( expm1_grad, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>); /* ========================================================================== */ /* ========================== Log register ==================================*/ REGISTER_ACTIVATION_CUDA_KERNEL(log, Log, CudaLogFunctor, CudaLogGradFunctor); REGISTER_OP_CUDA_KERNEL( log_grad_grad, ops::LogDoubleGradKernel>, ops::LogDoubleGradKernel>, ops::LogDoubleGradKernel>); /* ========================================================================== */ #define FOR_EACH_ACTIVATION_CUDA_OP(__macro) \ __macro(silu, Silu, CudaSiluFunctor, CudaSiluGradFunctor); \ __macro(logsigmoid, LogSigmoid, CudaLogSigmoidFunctor, \ CudaLogSigmoidGradFunctor); \ __macro(atan, Atan, CudaAtanFunctor, CudaAtanGradFunctor); \ __macro(softshrink, SoftShrink, CudaSoftShrinkFunctor, \ CudaSoftShrinkGradFunctor); \ __macro(ceil, Ceil, CudaCeilFunctor, CudaZeroGradFunctor); \ __macro(floor, Floor, CudaFloorFunctor, CudaZeroGradFunctor); \ __macro(cos, Cos, CudaCosFunctor, CudaCosGradFunctor); \ __macro(tan, Tan, CudaTanFunctor, CudaTanGradFunctor); \ __macro(acos, Acos, CudaAcosFunctor, CudaAcosGradFunctor); \ __macro(sin, Sin, CudaSinFunctor, CudaSinGradFunctor); \ __macro(asin, Asin, CudaAsinFunctor, CudaAsinGradFunctor); \ __macro(sinh, Sinh, CudaSinhFunctor, CudaSinhGradFunctor); \ __macro(cosh, Cosh, CudaCoshFunctor, CudaCoshGradFunctor); \ __macro(round, Round, CudaRoundFunctor, CudaZeroGradFunctor); \ __macro(reciprocal, Reciprocal, CudaReciprocalFunctor, \ CudaReciprocalGradFunctor); \ __macro(log1p, Log1p, CudaLog1pFunctor, CudaLog1pGradFunctor); \ __macro(log2, Log2, CudaLog2Functor, CudaLog2GradFunctor); \ __macro(log10, Log10, CudaLog10Functor, CudaLog10GradFunctor); \ __macro(brelu, BRelu, CudaBReluFunctor, CudaBReluGradFunctor); \ __macro(soft_relu, SoftRelu, CudaSoftReluFunctor, CudaSoftReluGradFunctor); \ __macro(stanh, STanh, CudaSTanhFunctor, CudaSTanhGradFunctor); \ __macro(softplus, Softplus, CudaSoftplusFunctor, CudaSoftplusGradFunctor); \ __macro(softsign, Softsign, CudaSoftsignFunctor, CudaSoftsignGradFunctor); \ __macro(relu6, Relu6, CudaRelu6Functor, CudaRelu6GradFunctor); \ __macro(tanh_shrink, TanhShrink, CudaTanhShrinkFunctor, \ CudaTanhShrinkGradFunctor); \ __macro(hard_shrink, HardShrink, CudaHardShrinkFunctor, \ CudaHardShrinkGradFunctor); \ __macro(hard_sigmoid, HardSigmoid, CudaHardSigmoidFunctor, \ CudaHardSigmoidGradFunctor); \ __macro(swish, Swish, CudaSwishFunctor, CudaSwishGradFunctor); \ __macro(thresholded_relu, ThresholdedRelu, CudaThresholdedReluFunctor, \ CudaThresholdedReluGradFunctor); \ __macro(hard_swish, HardSwish, CudaHardSwishFunctor, \ CudaHardSwishGradFunctor); FOR_EACH_ACTIVATION_CUDA_OP(REGISTER_ACTIVATION_CUDA_KERNEL)