/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/operators/math/math_cuda_utils.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { template struct CudaReluFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); // relu(x) = max(x, 0) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] > zero ? args[0] : zero; } }; template struct CudaReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); // dx = dout * (out > 0) // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { return args[1] > zero ? args[0] : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaLeakyReluFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // leakyrelu(x) = x > 0 ? x : alpha * x // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] > zero ? args[0] : static_cast(alpha) * args[0]; } }; template struct CudaLeakyReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // dx = dout * (x > 0 ? 1 : alpha) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[1] > zero ? args[0] : static_cast(alpha) * args[0]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSigmoidFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // sigmoid(x) = 1 / (1 + exp(-x)) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(one / (one + exp(-x))); } }; template struct CudaSigmoidGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout * out * (1 - out) // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { return args[0] * args[1] * (one - args[1]); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaSiluFunctor : public BaseActivationFunctor { // MPType means Compute Type using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // silu(x) = x / (1 + exp(-x)) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(x / (one + exp(-x))); } }; template struct CudaSiluGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // dx = dout * (1 + exp(-x) + x * exp(-x) / (1 + exp(-x))^2) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); MPType temp = one / (one + exp(-x)); return static_cast(dout * (temp * (one + x * (one - temp)))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaLogSigmoidFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType zero = static_cast(0.0f); // logsigmoid(x) = log(1 / (1 + exp(-x))) // For numerical stability, // logsigmoid(x) = // - (max(-x, 0) + log(exp(-max(-x, 0)) + exp(-x - max(-x, 0)))) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); MPType temp = x > zero ? zero : -x; return static_cast(-temp - log(exp(-temp) + exp(-x - temp))); } }; template struct CudaLogSigmoidGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType zero = static_cast(0.0f); // dx = dout * exp(-x) / (1 + exp(-x)) // For numerical stability: // dx = dout * exp(-x - max(-x, 0)) / (exp(-max(-x, 0)) + exp(-x - max(-x, // 0))) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); MPType temp1 = x > zero ? zero : -x; MPType temp2 = exp(-x - temp1); return static_cast(dout * (temp2 / (exp(-temp1) + temp2))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaAtanFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // atan(x) = atan(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(atan(x)); } }; template struct CudaAtanGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + x^2) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] / (one + args[1] * args[1]); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftShrinkFunctor : public BaseActivationFunctor { float lambda; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"lambda", &lambda}}; } // softshrink(x) = x - lambda, if x > lambda; // x + lambda, if x < -lambda; // 0, otherwise. // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { T x = args[0]; T l = static_cast(lambda); T temp1 = static_cast(x > l); T temp2 = static_cast(x < -l); return temp1 * (x - l) + temp2 * (x + l); } }; template struct CudaSoftShrinkGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float lambda; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"lambda", &lambda}}; } // dx = dout, if x > lambda or x < -lambda else 0 // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { T x = args[1]; T l = static_cast(lambda); return (x >= -l && x <= l) ? zero : args[0]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaCeilFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // ceil(x) = ceil(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(ceil(x)); } }; template struct CudaFloorFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // floor(x) = floor(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(floor(x)); } }; template struct CudaRoundFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // round(x) = round(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(round(x)); } }; // grad functor for ceil, floor and round template struct CudaZeroGradFunctor : public BaseActivationFunctor { __device__ __forceinline__ T operator()(const T* args) const { return static_cast(0.0f); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kNoDeps; } }; template struct CudaCosFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // cos(x) = cos(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(cos(x)); } }; template struct CudaCosGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * (-sin(x)) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(-dout * sin(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSinFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sin(x) = sin(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(sin(x)); } }; template struct CudaSinGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * cos(x) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(dout * cos(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaTanFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tan(x) = tan(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(tan(x)); } }; template struct CudaTanGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout / cos(x)^2 // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(dout / (cos(x) * cos(x))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaAsinFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // asin(x) = asin(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(asin(x)); } }; template struct CudaAsinGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // dx = dout / sqrt(1 - x^2) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(dout / sqrt(one - x * x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaAcosFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // acos(x) = acos(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(acos(x)); } }; template struct CudaAcosGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // dx = -dout / sqrt(1 - x^2) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(-dout / sqrt(one - x * x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaCoshFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // cosh(x) = cosh(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(cosh(x)); } }; template struct CudaCoshGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * sinh(x) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(dout * sinh(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSinhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sinh(x) = sinh(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(sinh(x)); } }; template struct CudaSinhGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * cosh(x) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(dout * cosh(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaTanhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tanh(x) = tanh(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(tanh(x)); } }; template struct CudaTanhGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout * (1 - out^2) // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { T dout = static_cast(args[0]); T out = static_cast(args[1]); return dout * (one - out * out); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaReciprocalFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // reciprocal(x) = 1 / x // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { return one / args[0]; } }; template struct CudaReciprocalGradFunctor : public BaseActivationFunctor { // dx = -dout * out^2 // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { return -args[0] * args[1] * args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaExpFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // exp(x) = exp(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(exp(x)); } }; template struct CudaExpGradFunctor : public BaseActivationFunctor { // dx = dout * out // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { return args[0] * args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaLogFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log(x) = log(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(log(x)); } }; template struct CudaLogGradFunctor : public BaseActivationFunctor { // dx = dout / x // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] / args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSquareFunctor : public BaseActivationFunctor { // square(x) = x * x // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] * args[0]; } }; template struct CudaSquareGradFunctor : public BaseActivationFunctor { T two = static_cast(2.0f); // dx = dout * 2 * x // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] * two * args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSqrtFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sqrt(x) = sqrt(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(sqrt(x)); } }; template struct CudaSqrtGradFunctor : public BaseActivationFunctor { T one_half = static_cast(0.5f); // dx = dout * 0.5 / out // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { return one_half * args[0] / args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaRsqrtFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // rsqrt(x) = rsqrt(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(rsqrt(x)); } }; template struct CudaRsqrtGradFunctor : public BaseActivationFunctor { T minus_one_half = static_cast(-0.5f); // dx = dout * -0.5 / out^3 // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { T out = args[1]; return minus_one_half * args[0] * out * out * out; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaLog1pFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // log1p(x) = log(1 + x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(log(one + x)); } }; template struct CudaLog1pGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + x) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] / (one + args[1]); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaLog2Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log2(x) = log2(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(log2(x)); } }; template struct CudaLog2GradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; T log_two = static_cast(log(static_cast(2.0f))); // dx = dout / (x * log(2)) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] / (args[1] * log_two); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaLog10Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log10(x) = log10(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(log10(x)); } }; template struct CudaLog10GradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; T log_ten = static_cast(log(static_cast(10.0f))); // dx = dout / (x * log(10)) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] / (args[1] * log_ten); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaBReluFunctor : public BaseActivationFunctor { float t_min; float t_max; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"t_min", &t_min}, {"t_max", &t_max}}; } // brelu(x) = min(max(x, t_min), t_max) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { T x = args[0]; T t_min_cast = static_cast(t_min); T t_max_cast = static_cast(t_max); T temp_max = x > t_min_cast ? x : t_min_cast; T temp_min = temp_max < t_max_cast ? temp_max : t_max_cast; return temp_min; } }; template struct CudaBReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float t_min; float t_max; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"t_min", &t_min}, {"t_max", &t_max}}; } // dx = (x > t_min && x < t_max) ? dout : 0 // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { T dout = args[0]; T x = args[1]; T t_min_cast = static_cast(t_min); T t_max_cast = static_cast(t_max); return (x > t_min_cast && x < t_max_cast) ? dout : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftReluFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // soft_relu(x) = log(1 + exp(max(min(x, threshold), -threshold))) // Inputs: args[0], the input x // threshold should not be negative __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); MPType t = static_cast(threshold); MPType temp_min = x < t ? x : t; MPType temp_max = temp_min > -t ? temp_min : -t; return static_cast(log(one + exp(temp_max))); } }; template struct CudaSoftReluGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (out > -threshold && out < threshold) ? dout * (1 - exp(-out)) : 0 // Inputs: args[0], the input dout // args[1], the input out // threshold should not be negative __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType out = static_cast(args[1]); MPType t = static_cast(threshold); return (out > -t && out < t) ? static_cast(dout * (one - exp(-out))) : static_cast(0.0f); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaSTanhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; float scale_a; float scale_b; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; } // stanh(x) = b * tanh(a * x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); MPType a = static_cast(scale_a); MPType b = static_cast(scale_b); return static_cast(b * tanh(a * x)); } }; template struct CudaSTanhGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float scale_a; float scale_b; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; } // dx = dout * a * b * (1 - tanh(a * x) * tanh(a * x)) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); MPType a = static_cast(scale_a); MPType b = static_cast(scale_b); MPType temp = tanh(a * x); return static_cast(dout * a * b * (one - temp * temp)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftplusFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}, {"threshold", &threshold}}; } // softplus(x) = beta * x > threshold ? x : log(1 + exp(beta * x)) / beta // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); MPType b = static_cast(beta); MPType t = static_cast(threshold); MPType x_beta = x * beta; return static_cast(x_beta > t ? x : log(one + exp(x_beta)) / b); } }; template struct CudaSoftplusGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}, {"threshold", &threshold}}; } // dx = x * beta > threshold ? dout : dout / (1 + exp(-beta * x)) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); MPType b = static_cast(beta); MPType t = static_cast(threshold); MPType x_beta = x * beta; return x_beta > t ? args[0] : static_cast(dout / (one + exp(-x_beta))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaSoftsignFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // softsign(x) = x / (1 + abs(x)) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] / (one + abs(args[0])); } }; template struct CudaSoftsignGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + abs(x))^2 // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { T temp = one + abs(args[1]); return args[0] / (temp * temp); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaRelu6Functor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // relu6(x) = min(max(0, x), 6) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { T t = static_cast(threshold); return args[0] <= zero ? zero : (args[0] < t ? args[0] : t); } }; template struct CudaRelu6GradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (out > 0 && out < t) ? dout : 0 // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { T t = static_cast(threshold); return (args[1] > zero && args[1] < t) ? args[0] : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaTanhShrinkFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tanhshrink(x) = x - tanh(x) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); return static_cast(x - tanh(x)); } }; template struct CudaTanhShrinkGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * tanh(x)^2 // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); return static_cast(dout * tanh(x) * tanh(x)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaHardShrinkFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // hadrshrink(x) = (x > -threshold && x < threshold) ? 0 : x // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { T x = args[0]; T t = static_cast(threshold); return (x > -t && x < t) ? zero : x; } }; template struct CudaHardShrinkGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = (x > -threshold && x < threshold) ? 0 : dout // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { T x = args[1]; T t = static_cast(threshold); return (x > -t && x < t) ? zero : args[0]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaHardSigmoidFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); T one = static_cast(1.0f); float slope; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"slope", &slope}, {"offset", &offset}}; } // hard_sigmoid(x) = 0, when x <= -3 // 1, when x >= 3 // x * slope + offset, otherwise // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { T temp = args[0] * static_cast(slope) + static_cast(offset); T temp_max = temp > zero ? temp : zero; T temp_min = temp_max < one ? temp_max : one; return temp_min; } }; template struct CudaHardSigmoidGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); T one = static_cast(1.0f); float slope; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"slope", &slope}, {"offset", &offset}}; } // dx = (out > 0 && out < 1) ? dout * slope : 0 // Inputs: args[0], the input dout // args[1], the input out __device__ __forceinline__ T operator()(const T* args) const { T out = args[1]; return (out > zero && out < one) ? args[0] * static_cast(slope) : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } }; template struct CudaSwishFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}}; } // swish(x) = x / (1 + exp(-beta * x)) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType x = static_cast(args[0]); MPType b = static_cast(beta); return static_cast(x / (one + exp(-b * x))); } }; template struct CudaSwishGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); float beta; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"beta", &beta}}; } // dx = dout * (1 + exp(-b * x) + b * x * exp(-b * x) / (1 + exp(-b * x))^2) // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); MPType b = static_cast(beta); MPType temp1 = one / (one + exp(-b * x)); MPType out = x * temp1; MPType temp2 = b * out; MPType temp3 = temp1 * (one - temp2); return static_cast(dout * (temp2 + temp3)); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaThresholdedReluFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // thresholded_relu(x) = x > threshold ? x : 0 // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[0] > static_cast(threshold) ? args[0] : zero; } }; template struct CudaThresholdedReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } // dx = x > threshold ? dout : 0 // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { return args[1] > static_cast(threshold) ? args[0] : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaHardSwishFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); float threshold; float scale; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}, {"scale", &scale}, {"offset", &offset}}; } // hard_swish(x) = 0, when x <= -offset // x , when x >= threshold - offset // x * (x + offset) / scale, otherwise // threshold = scale = 6, offset = 3 by default // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { T x = args[0]; T t = static_cast(threshold); T temp = x + static_cast(offset); T temp_max = temp > zero ? temp : zero; T temp_min = temp_max < t ? temp_max : t; return temp_min * x / static_cast(scale); } }; template struct CudaHardSwishGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); T one = static_cast(1.0f); T two = static_cast(2.0f); float threshold; float scale; float offset; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}, {"scale", &scale}, {"offset", &offset}}; } // dx = 0, when x <= -offset // dout , when x >= threshold - offset // dout * (2 * x / scale + offset / scale), otherwise // threshold = scale = 6, offset = 3 by default // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { T x = args[1]; T o = static_cast(offset); T s = static_cast(scale); T temp1 = static_cast(x + o > zero); T temp2 = static_cast(x + o < static_cast(threshold)); return args[0] * (temp1 * temp2 * (two * x + o) / s + one - temp2); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template struct CudaELUFunctor : public BaseActivationFunctor { using CT = typename details::MPTypeTrait::Type; CT zero = static_cast(0.0f); CT one = static_cast(1.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // elu(x) = max(0, x) + min(0, alpha * (exp(x) - 1)) // Inputs: args[0], the input x __device__ __forceinline__ T operator()(const T* args) const { CT x = static_cast(args[0]); CT temp = static_cast(alpha) * (exp(x) - one); CT res = (x > zero ? x : zero) + (temp > zero ? zero : temp); return static_cast(res); } }; template struct CudaELUGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; MPType zero = static_cast(0.0f); MPType one = static_cast(1.0f); float alpha; typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } // dx = dout, if alpha > 0 and x > 0 // dx = dout * alpha * x.exp(), if alpha > 0 and x <= 0 // dx = dout * (1 + alpha * x.exp()), if alpha <= 0 and x > 0 // dx = 0, if alpha <= 0 and x <=0 // Inputs: args[0], the input dout // args[1], the input x __device__ __forceinline__ T operator()(const T* args) const { MPType dout = static_cast(args[0]); MPType x = static_cast(args[1]); MPType a = static_cast(alpha); MPType temp_a_pos = static_cast(alpha > 0.0f); MPType temp_a_neg = static_cast(alpha <= 0.0f); MPType temp_x_pos = static_cast(x > zero); MPType temp_x_neg = static_cast(x <= zero); return static_cast( dout * (temp_a_pos * temp_x_pos + temp_a_pos * temp_x_neg * a * exp(x) + temp_a_neg * temp_x_pos * (one + a * exp(x)))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } }; template class ActivationCudaKernel : public framework::OpKernel { public: using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor* x = nullptr; framework::Tensor* out = nullptr; ExtractActivationTensor(ctx, &x, &out); out->mutable_data(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context(); std::vector ins = {x}; std::vector outs = {out}; auto functor = Functor(); auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = ctx.Attr(attr.first); } LaunchElementwiseCudaKernel(dev_ctx, ins, &outs, functor); } }; template class ActivationGradCudaKernel : public framework::OpKernel { public: using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor *x, *out, *d_out; framework::Tensor* d_x = nullptr; x = out = d_out = nullptr; ExtractActivationGradTensor(ctx, &x, &out, &d_out, &d_x); d_x->mutable_data(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context(); auto functor = Functor(); auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = ctx.Attr(attr.first); } std::vector ins = {d_out}; std::vector outs = {d_x}; if (static_cast(Functor::FwdDeps()) == static_cast(kDepOut)) { // Only need forward output Out ins.push_back(out); LaunchElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } else if (static_cast(Functor::FwdDeps()) == static_cast(kDepX)) { // Only need forward input X ins.push_back(x); LaunchElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } else { LaunchElementwiseCudaKernel( dev_ctx, ins, &outs, functor); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; #define REGISTER_ACTIVATION_CUDA_KERNEL(act_type, op_name, functor, \ grad_functor) \ REGISTER_OP_CUDA_KERNEL( \ act_type, ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>); \ REGISTER_OP_CUDA_KERNEL( \ act_type##_grad, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>); #define REGISTER_ACTIVATION_CUDA_KERNEL_INT(act_type, op_name, functor, \ grad_functor) \ REGISTER_OP_CUDA_KERNEL( \ act_type, ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>, \ ops::ActivationCudaKernel>); \ REGISTER_OP_CUDA_KERNEL( \ act_type##_grad, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>, \ ops::ActivationGradCudaKernel>); /* ======================== leaky relu register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL(leaky_relu, LeakyRelu, CudaLeakyReluFunctor, CudaLeakyReluGradFunctor); REGISTER_OP_CUDA_KERNEL( leaky_relu_grad_grad, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel< plat::CUDADeviceContext, ops::LeakyReluGradGradFunctor>); /* ========================================================================== */ /* ======================== elu register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL(elu, ELU, CudaELUFunctor, CudaELUGradFunctor); REGISTER_OP_CUDA_KERNEL( elu_grad_grad, ops::ELUDoubleGradKernel>, ops::ELUDoubleGradKernel>, ops::ELUDoubleGradKernel>); /* ========================================================================== */ /* =========================== relu register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL(relu, Relu, CudaReluFunctor, CudaReluGradFunctor); REGISTER_OP_CUDA_KERNEL( relu_grad_grad, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>, ops::ActivationDoubleGradKernel>); /* ========================================================================== */ /* =========================== tanh register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL(tanh, Tanh, CudaTanhFunctor, CudaTanhGradFunctor); REGISTER_OP_CUDA_KERNEL( tanh_grad_grad, ops::TanhDoubleGradKernel>, ops::TanhDoubleGradKernel>, ops::TanhDoubleGradKernel>); /* ========================================================================== */ /* =========================== sqrt register ============================= */ REGISTER_ACTIVATION_CUDA_KERNEL(sqrt, Sqrt, CudaSqrtFunctor, CudaSqrtGradFunctor); REGISTER_OP_CUDA_KERNEL( sqrt_grad_grad, ops::SqrtDoubleGradKernel>, ops::SqrtDoubleGradKernel>, ops::SqrtDoubleGradKernel>); /* ========================================================================== */ /* =========================== rsqrt register ============================= */ REGISTER_ACTIVATION_CUDA_KERNEL(rsqrt, Rsqrt, CudaRsqrtFunctor, CudaRsqrtGradFunctor); REGISTER_OP_CUDA_KERNEL( rsqrt_grad_grad, ops::RsqrtDoubleGradKernel>, ops::RsqrtDoubleGradKernel>, ops::RsqrtDoubleGradKernel>); /* ========================================================================== */ /* =========================== square register ============================ */ REGISTER_ACTIVATION_CUDA_KERNEL_INT(square, Square, CudaSquareFunctor, CudaSquareGradFunctor); REGISTER_OP_CUDA_KERNEL( square_grad_grad, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>, ops::SquareDoubleGradKernel>); /* ========================================================================== */ /* ========================== pow register ============================ */ REGISTER_OP_CUDA_KERNEL( pow, ops::PowKernel>, ops::PowKernel>, ops::PowKernel>, ops::PowKernel>, ops::PowKernel>); REGISTER_OP_CUDA_KERNEL( pow_grad, ops::PowGradKernel>, ops::PowGradKernel>, ops::PowGradKernel>, ops::PowGradKernel>, ops::PowGradKernel>); /* ========================================================================== */ /* ========================== exp register ============================ */ REGISTER_OP_CUDA_KERNEL( exp, ops::ActivationCudaKernel>, ops::ActivationCudaKernel>, ops::ActivationKernel>, ops::ActivationKernel>, ops::ActivationCudaKernel>); REGISTER_OP_CUDA_KERNEL( exp_grad, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>, ops::ActivationGradCudaKernel>); /* ========================================================================== */ /* ========================== Log register ==================================*/ REGISTER_ACTIVATION_CUDA_KERNEL(log, Log, CudaLogFunctor, CudaLogGradFunctor); REGISTER_OP_CUDA_KERNEL( log_grad_grad, ops::LogDoubleGradKernel>, ops::LogDoubleGradKernel>, ops::LogDoubleGradKernel>); /* ========================================================================== */ #define FOR_EACH_ACTIVATION_CUDA_OP(__macro) \ __macro(sigmoid, Sigmoid, CudaSigmoidFunctor, CudaSigmoidGradFunctor); \ __macro(silu, Silu, CudaSiluFunctor, CudaSiluGradFunctor); \ __macro(logsigmoid, LogSigmoid, CudaLogSigmoidFunctor, \ CudaLogSigmoidGradFunctor); \ __macro(atan, Atan, CudaAtanFunctor, CudaAtanGradFunctor); \ __macro(softshrink, SoftShrink, CudaSoftShrinkFunctor, \ CudaSoftShrinkGradFunctor); \ __macro(ceil, Ceil, CudaCeilFunctor, CudaZeroGradFunctor); \ __macro(floor, Floor, CudaFloorFunctor, CudaZeroGradFunctor); \ __macro(cos, Cos, CudaCosFunctor, CudaCosGradFunctor); \ __macro(tan, Tan, CudaTanFunctor, CudaTanGradFunctor); \ __macro(acos, Acos, CudaAcosFunctor, CudaAcosGradFunctor); \ __macro(sin, Sin, CudaSinFunctor, CudaSinGradFunctor); \ __macro(asin, Asin, CudaAsinFunctor, CudaAsinGradFunctor); \ __macro(sinh, Sinh, CudaSinhFunctor, CudaSinhGradFunctor); \ __macro(cosh, Cosh, CudaCoshFunctor, CudaCoshGradFunctor); \ __macro(round, Round, CudaRoundFunctor, CudaZeroGradFunctor); \ __macro(reciprocal, Reciprocal, CudaReciprocalFunctor, \ CudaReciprocalGradFunctor); \ __macro(log1p, Log1p, CudaLog1pFunctor, CudaLog1pGradFunctor); \ __macro(log2, Log2, CudaLog2Functor, CudaLog2GradFunctor); \ __macro(log10, Log10, CudaLog10Functor, CudaLog10GradFunctor); \ __macro(brelu, BRelu, CudaBReluFunctor, CudaBReluGradFunctor); \ __macro(soft_relu, SoftRelu, CudaSoftReluFunctor, CudaSoftReluGradFunctor); \ __macro(stanh, STanh, CudaSTanhFunctor, CudaSTanhGradFunctor); \ __macro(softplus, Softplus, CudaSoftplusFunctor, CudaSoftplusGradFunctor); \ __macro(softsign, Softsign, CudaSoftsignFunctor, CudaSoftsignGradFunctor); \ __macro(relu6, Relu6, CudaRelu6Functor, CudaRelu6GradFunctor); \ __macro(tanh_shrink, TanhShrink, CudaTanhShrinkFunctor, \ CudaTanhShrinkGradFunctor); \ __macro(hard_shrink, HardShrink, CudaHardShrinkFunctor, \ CudaHardShrinkGradFunctor); \ __macro(hard_sigmoid, HardSigmoid, CudaHardSigmoidFunctor, \ CudaHardSigmoidGradFunctor); \ __macro(swish, Swish, CudaSwishFunctor, CudaSwishGradFunctor); \ __macro(thresholded_relu, ThresholdedRelu, CudaThresholdedReluFunctor, \ CudaThresholdedReluGradFunctor); \ __macro(hard_swish, HardSwish, CudaHardSwishFunctor, \ CudaHardSwishGradFunctor); FOR_EACH_ACTIVATION_CUDA_OP(REGISTER_ACTIVATION_CUDA_KERNEL)