diff --git a/paddle/fluid/operators/abs_op.cu b/paddle/fluid/operators/abs_op.cu index 94b0a3ae729380c90f1351df3b2d42d34d88be57..b0eba229fde51841542b5d8d1d73330b40bd29f0 100644 --- a/paddle/fluid/operators/abs_op.cu +++ b/paddle/fluid/operators/abs_op.cu @@ -24,15 +24,15 @@ struct CudaAbsFunctor; template struct CudaAbsFunctor>> { - __device__ __forceinline__ math::Real operator()(const T& x) const { - return abs(x); + __device__ __forceinline__ math::Real operator()(const T* args) const { + return abs(args[0]); } }; template struct CudaAbsFunctor>> { - __device__ __forceinline__ T operator()(const T& x) const { - return std::abs(x); + __device__ __forceinline__ T operator()(const T* args) const { + return std::abs(args[0]); } }; diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index 72f10bf19e733abbedd29f1facd00916756cb474..6c02450479141b2de670b09b0e0346161d5a7128 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -24,8 +24,9 @@ struct CudaReluFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); // relu(x) = max(x, 0) - __device__ __forceinline__ T operator()(const T& x) const { - return x > zero ? x : zero; + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] > zero ? args[0] : zero; } }; @@ -34,8 +35,10 @@ struct CudaReluGradFunctor : public BaseActivationFunctor { T zero = static_cast(0.0f); // dx = dout * (out > 0) - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return out > zero ? dout : zero; + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + return args[1] > zero ? args[0] : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -51,8 +54,9 @@ struct CudaLeakyReluFunctor : public BaseActivationFunctor { } // leakyrelu(x) = x > 0 ? x : alpha * x - __device__ __forceinline__ T operator()(const T& x) const { - return x > zero ? x : static_cast(alpha) * x; + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] > zero ? args[0] : static_cast(alpha) * args[0]; } }; @@ -66,8 +70,10 @@ struct CudaLeakyReluGradFunctor : public BaseActivationFunctor { } // dx = dout * (x > 0 ? 1 : alpha) - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return x > zero ? dout : static_cast(alpha) * dout; + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[1] > zero ? args[0] : static_cast(alpha) * args[0]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -79,8 +85,9 @@ struct CudaSigmoidFunctor : public BaseActivationFunctor { MPType one = static_cast(1.0f); // sigmoid(x) = 1 / (1 + exp(-x)) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(one / (one + exp(-x))); } }; @@ -90,8 +97,10 @@ struct CudaSigmoidGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout * out * (1 - out) - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return dout * out * (one - out); + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] * args[1] * (one - args[1]); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -99,12 +108,14 @@ struct CudaSigmoidGradFunctor : public BaseActivationFunctor { template struct CudaSiluFunctor : public BaseActivationFunctor { + // MPType means Compute Type using MPType = typename details::MPTypeTrait::Type; MPType one = static_cast(1.0f); // silu(x) = x / (1 + exp(-x)) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(x / (one + exp(-x))); } }; @@ -115,10 +126,11 @@ struct CudaSiluGradFunctor : public BaseActivationFunctor { MPType one = static_cast(1.0f); // dx = dout * (1 + exp(-x) + x * exp(-x) / (1 + exp(-x))^2) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); MPType temp = one / (one + exp(-x)); return static_cast(dout * (temp * (one + x * (one - temp)))); } @@ -135,8 +147,9 @@ struct CudaLogSigmoidFunctor : public BaseActivationFunctor { // For numerical stability, // logsigmoid(x) = // - (max(-x, 0) + log(exp(-max(-x, 0)) + exp(-x - max(-x, 0)))) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); MPType temp = x > zero ? zero : -x; return static_cast(-temp - log(exp(-temp) + exp(-x - temp))); } @@ -151,10 +164,11 @@ struct CudaLogSigmoidGradFunctor : public BaseActivationFunctor { // For numerical stability: // dx = dout * exp(-x - max(-x, 0)) / (exp(-max(-x, 0)) + exp(-x - max(-x, // 0))) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); MPType temp1 = x > zero ? zero : -x; MPType temp2 = exp(-x - temp1); return static_cast(dout * (temp2 / (exp(-temp1) + temp2))); @@ -168,8 +182,9 @@ struct CudaAtanFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // atan(x) = atan(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(atan(x)); } }; @@ -179,8 +194,10 @@ struct CudaAtanGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + x^2) - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return dout / (one + x * x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] / (one + args[1] * args[1]); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -197,7 +214,9 @@ struct CudaSoftShrinkFunctor : public BaseActivationFunctor { // softshrink(x) = x - lambda, if x > lambda; // x + lambda, if x < -lambda; // 0, otherwise. - __device__ __forceinline__ T operator()(const T& x) const { + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T x = args[0]; T l = static_cast(lambda); T temp1 = static_cast(x > l); T temp2 = static_cast(x < -l); @@ -215,9 +234,12 @@ struct CudaSoftShrinkGradFunctor : public BaseActivationFunctor { } // dx = dout, if x > lambda or x < -lambda else 0 - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T x = args[1]; T l = static_cast(lambda); - return (x >= -l && x <= l) ? zero : dout; + return (x >= -l && x <= l) ? zero : args[0]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -228,8 +250,9 @@ struct CudaCeilFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // ceil(x) = ceil(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(ceil(x)); } }; @@ -239,8 +262,9 @@ struct CudaFloorFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // floor(x) = floor(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(floor(x)); } }; @@ -250,16 +274,17 @@ struct CudaRoundFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // round(x) = round(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(round(x)); } }; -// GradFunctor for ceil, floor and round +// grad functor for ceil, floor and round template struct CudaZeroGradFunctor : public BaseActivationFunctor { - __device__ __forceinline__ T operator()(const T& x) const { + __device__ __forceinline__ T operator()(const T* args) const { return static_cast(0.0f); } @@ -271,8 +296,9 @@ struct CudaCosFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // cos(x) = cos(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(cos(x)); } }; @@ -282,10 +308,11 @@ struct CudaCosGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * (-sin(x)) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(-dout * sin(x)); } @@ -297,8 +324,9 @@ struct CudaSinFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sin(x) = sin(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(sin(x)); } }; @@ -308,10 +336,11 @@ struct CudaSinGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * cos(x) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(dout * cos(x)); } @@ -323,8 +352,9 @@ struct CudaTanFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tan(x) = tan(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(tan(x)); } }; @@ -334,10 +364,11 @@ struct CudaTanGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout / cos(x)^2 - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(dout / (cos(x) * cos(x))); } @@ -349,8 +380,9 @@ struct CudaAsinFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // asin(x) = asin(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(asin(x)); } }; @@ -361,10 +393,11 @@ struct CudaAsinGradFunctor : public BaseActivationFunctor { MPType one = static_cast(1.0f); // dx = dout / sqrt(1 - x^2) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(dout / sqrt(one - x * x)); } @@ -376,8 +409,9 @@ struct CudaAcosFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // acos(x) = acos(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(acos(x)); } }; @@ -388,10 +422,11 @@ struct CudaAcosGradFunctor : public BaseActivationFunctor { MPType one = static_cast(1.0f); // dx = -dout / sqrt(1 - x^2) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(-dout / sqrt(one - x * x)); } @@ -403,8 +438,9 @@ struct CudaCoshFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // cosh(x) = cosh(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(cosh(x)); } }; @@ -414,10 +450,11 @@ struct CudaCoshGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * sinh(x) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(dout * sinh(x)); } @@ -429,8 +466,9 @@ struct CudaSinhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sinh(x) = sinh(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(sinh(x)); } }; @@ -440,10 +478,11 @@ struct CudaSinhGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * cosh(x) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(dout * cosh(x)); } @@ -455,8 +494,9 @@ struct CudaTanhFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tanh(x) = tanh(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(tanh(x)); } }; @@ -466,7 +506,11 @@ struct CudaTanhGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout * (1 - out^2) - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + T dout = static_cast(args[0]); + T out = static_cast(args[1]); return dout * (one - out * out); } @@ -478,14 +522,19 @@ struct CudaReciprocalFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // reciprocal(x) = 1 / x - __device__ __forceinline__ T operator()(const T& x) const { return one / x; } + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return one / args[0]; + } }; template struct CudaReciprocalGradFunctor : public BaseActivationFunctor { // dx = -dout * out^2 - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return -dout * out * out; + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + return -args[0] * args[1] * args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -496,8 +545,9 @@ struct CudaExpFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // exp(x) = exp(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(exp(x)); } }; @@ -505,8 +555,10 @@ struct CudaExpFunctor : public BaseActivationFunctor { template struct CudaExpGradFunctor : public BaseActivationFunctor { // dx = dout * out - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return dout * out; + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] * args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -517,8 +569,9 @@ struct CudaExpm1Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // expm1(x) = expm1(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(expm1(x)); } }; @@ -526,8 +579,10 @@ struct CudaExpm1Functor : public BaseActivationFunctor { template struct CudaExpm1GradFunctor : public BaseActivationFunctor { // dx = dout * out - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return dout * out + dout; + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] * args[1] + args[0]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -538,8 +593,9 @@ struct CudaLogFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log(x) = log(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(log(x)); } }; @@ -547,8 +603,10 @@ struct CudaLogFunctor : public BaseActivationFunctor { template struct CudaLogGradFunctor : public BaseActivationFunctor { // dx = dout / x - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return dout / x; + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] / args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -557,7 +615,10 @@ struct CudaLogGradFunctor : public BaseActivationFunctor { template struct CudaSquareFunctor : public BaseActivationFunctor { // square(x) = x * x - __device__ __forceinline__ T operator()(const T& x) const { return x * x; } + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] * args[0]; + } }; template @@ -565,8 +626,10 @@ struct CudaSquareGradFunctor : public BaseActivationFunctor { T two = static_cast(2.0f); // dx = dout * 2 * x - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return dout * two * x; + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] * two * args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -577,8 +640,9 @@ struct CudaSqrtFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // sqrt(x) = sqrt(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(sqrt(x)); } }; @@ -588,8 +652,10 @@ struct CudaSqrtGradFunctor : public BaseActivationFunctor { T one_half = static_cast(0.5f); // dx = dout * 0.5 / out - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return one_half * dout / out; + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + return one_half * args[0] / args[1]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -600,8 +666,9 @@ struct CudaRsqrtFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // rsqrt(x) = rsqrt(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(rsqrt(x)); } }; @@ -610,9 +677,12 @@ template struct CudaRsqrtGradFunctor : public BaseActivationFunctor { T minus_one_half = static_cast(-0.5f); - // dx = -0.5 * dout * out^3 - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return minus_one_half * dout * out * out * out; + // dx = dout * -0.5 / out^3 + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + T out = args[1]; + return minus_one_half * args[0] * out * out * out; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -624,8 +694,9 @@ struct CudaLog1pFunctor : public BaseActivationFunctor { MPType one = static_cast(1.0f); // log1p(x) = log(1 + x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(log(one + x)); } }; @@ -635,8 +706,10 @@ struct CudaLog1pGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + x) - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return dout / (one + x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] / (one + args[1]); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -647,8 +720,9 @@ struct CudaLog2Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log2(x) = log2(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(log2(x)); } }; @@ -659,8 +733,10 @@ struct CudaLog2GradFunctor : public BaseActivationFunctor { T log_two = static_cast(log(static_cast(2.0f))); // dx = dout / (x * log(2)) - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return dout / (x * log_two); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] / (args[1] * log_two); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -671,8 +747,9 @@ struct CudaLog10Functor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // log10(x) = log10(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(log10(x)); } }; @@ -683,8 +760,10 @@ struct CudaLog10GradFunctor : public BaseActivationFunctor { T log_ten = static_cast(log(static_cast(10.0f))); // dx = dout / (x * log(10)) - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return dout / (x * log_ten); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] / (args[1] * log_ten); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -700,7 +779,9 @@ struct CudaBReluFunctor : public BaseActivationFunctor { } // brelu(x) = min(max(x, t_min), t_max) - __device__ __forceinline__ T operator()(const T& x) const { + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T x = args[0]; T t_min_cast = static_cast(t_min); T t_max_cast = static_cast(t_max); T temp_max = x > t_min_cast ? x : t_min_cast; @@ -720,7 +801,11 @@ struct CudaBReluGradFunctor : public BaseActivationFunctor { } // dx = (x > t_min && x < t_max) ? dout : 0 - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T dout = args[0]; + T x = args[1]; T t_min_cast = static_cast(t_min); T t_max_cast = static_cast(t_max); return (x > t_min_cast && x < t_max_cast) ? dout : zero; @@ -740,9 +825,10 @@ struct CudaSoftReluFunctor : public BaseActivationFunctor { } // soft_relu(x) = log(1 + exp(max(min(x, threshold), -threshold))) + // Inputs: args[0], the input x // threshold should not be negative - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); MPType t = static_cast(threshold); MPType temp_min = x < t ? x : t; MPType temp_max = temp_min > -t ? temp_min : -t; @@ -761,11 +847,12 @@ struct CudaSoftReluGradFunctor : public BaseActivationFunctor { } // dx = (out > -threshold && out < threshold) ? dout * (1 - exp(-out)) : 0 + // Inputs: args[0], the input dout + // args[1], the input out // threshold should not be negative - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_out) const { - MPType dout = static_cast(arg_dout); - MPType out = static_cast(arg_out); + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType out = static_cast(args[1]); MPType t = static_cast(threshold); return (out > -t && out < t) ? static_cast(dout * (one - exp(-out))) : static_cast(0.0f); @@ -785,8 +872,9 @@ struct CudaSTanhFunctor : public BaseActivationFunctor { } // stanh(x) = b * tanh(a * x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); MPType a = static_cast(scale_a); MPType b = static_cast(scale_b); return static_cast(b * tanh(a * x)); @@ -805,10 +893,11 @@ struct CudaSTanhGradFunctor : public BaseActivationFunctor { } // dx = dout * a * b * (1 - tanh(a * x) * tanh(a * x)) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); MPType a = static_cast(scale_a); MPType b = static_cast(scale_b); MPType temp = tanh(a * x); @@ -830,8 +919,9 @@ struct CudaSoftplusFunctor : public BaseActivationFunctor { } // softplus(x) = beta * x > threshold ? x : log(1 + exp(beta * x)) / beta - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); MPType b = static_cast(beta); MPType t = static_cast(threshold); MPType x_beta = x * beta; @@ -851,14 +941,15 @@ struct CudaSoftplusGradFunctor : public BaseActivationFunctor { } // dx = x * beta > threshold ? dout : dout / (1 + exp(-beta * x)) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); MPType b = static_cast(beta); MPType t = static_cast(threshold); MPType x_beta = x * beta; - return x_beta > t ? arg_dout : static_cast(dout / (one + exp(-x_beta))); + return x_beta > t ? args[0] : static_cast(dout / (one + exp(-x_beta))); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -869,8 +960,9 @@ struct CudaSoftsignFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // softsign(x) = x / (1 + abs(x)) - __device__ __forceinline__ T operator()(const T& x) const { - return x / (one + abs(x)); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] / (one + abs(args[0])); } }; @@ -879,9 +971,11 @@ struct CudaSoftsignGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); // dx = dout / (1 + abs(x))^2 - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - T temp = one + abs(x); - return dout / (temp * temp); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T temp = one + abs(args[1]); + return args[0] / (temp * temp); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -897,9 +991,10 @@ struct CudaRelu6Functor : public BaseActivationFunctor { } // relu6(x) = min(max(0, x), 6) - __device__ __forceinline__ T operator()(const T& x) const { + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { T t = static_cast(threshold); - return x <= zero ? zero : (x < t ? x : t); + return args[0] <= zero ? zero : (args[0] < t ? args[0] : t); } }; @@ -913,9 +1008,11 @@ struct CudaRelu6GradFunctor : public BaseActivationFunctor { } // dx = (out > 0 && out < t) ? dout : 0 - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { T t = static_cast(threshold); - return (out > zero && out < t) ? dout : zero; + return (args[1] > zero && args[1] < t) ? args[0] : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -926,8 +1023,9 @@ struct CudaTanhShrinkFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // tanhshrink(x) = x - tanh(x) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); return static_cast(x - tanh(x)); } }; @@ -937,10 +1035,11 @@ struct CudaTanhShrinkGradFunctor : public BaseActivationFunctor { using MPType = typename details::MPTypeTrait::Type; // dx = dout * tanh(x)^2 - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); return static_cast(dout * tanh(x) * tanh(x)); } @@ -957,7 +1056,9 @@ struct CudaHardShrinkFunctor : public BaseActivationFunctor { } // hadrshrink(x) = (x > -threshold && x < threshold) ? 0 : x - __device__ __forceinline__ T operator()(const T& x) const { + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T x = args[0]; T t = static_cast(threshold); return (x > -t && x < t) ? zero : x; } @@ -973,9 +1074,12 @@ struct CudaHardShrinkGradFunctor : public BaseActivationFunctor { } // dx = (x > -threshold && x < threshold) ? 0 : dout - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T x = args[1]; T t = static_cast(threshold); - return (x > -t && x < t) ? zero : dout; + return (x > -t && x < t) ? zero : args[0]; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -995,8 +1099,9 @@ struct CudaHardSigmoidFunctor : public BaseActivationFunctor { // hard_sigmoid(x) = 0, when x <= -3 // 1, when x >= 3 // x * slope + offset, otherwise - __device__ __forceinline__ T operator()(const T& x) const { - T temp = x * static_cast(slope) + static_cast(offset); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T temp = args[0] * static_cast(slope) + static_cast(offset); T temp_max = temp > zero ? temp : zero; T temp_min = temp_max < one ? temp_max : one; return temp_min; @@ -1015,8 +1120,11 @@ struct CudaHardSigmoidGradFunctor : public BaseActivationFunctor { } // dx = (out > 0 && out < 1) ? dout * slope : 0 - __device__ __forceinline__ T operator()(const T& dout, const T& out) const { - return (out > zero && out < one) ? dout * static_cast(slope) : zero; + // Inputs: args[0], the input dout + // args[1], the input out + __device__ __forceinline__ T operator()(const T* args) const { + T out = args[1]; + return (out > zero && out < one) ? args[0] * static_cast(slope) : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepOut; } @@ -1033,8 +1141,9 @@ struct CudaSwishFunctor : public BaseActivationFunctor { } // swish(x) = x / (1 + exp(-beta * x)) - __device__ __forceinline__ T operator()(const T& arg_x) const { - MPType x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType x = static_cast(args[0]); MPType b = static_cast(beta); return static_cast(x / (one + exp(-b * x))); } @@ -1051,10 +1160,11 @@ struct CudaSwishGradFunctor : public BaseActivationFunctor { } // dx = dout * (1 + exp(-b * x) + b * x * exp(-b * x) / (1 + exp(-b * x))^2) - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); MPType b = static_cast(beta); MPType temp1 = one / (one + exp(-b * x)); MPType out = x * temp1; @@ -1076,8 +1186,9 @@ struct CudaThresholdedReluFunctor : public BaseActivationFunctor { } // thresholded_relu(x) = x > threshold ? x : 0 - __device__ __forceinline__ T operator()(const T& x) const { - return x > static_cast(threshold) ? x : zero; + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[0] > static_cast(threshold) ? args[0] : zero; } }; @@ -1091,8 +1202,10 @@ struct CudaThresholdedReluGradFunctor : public BaseActivationFunctor { } // dx = x > threshold ? dout : 0 - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { - return x > static_cast(threshold) ? dout : zero; + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + return args[1] > static_cast(threshold) ? args[0] : zero; } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -1113,7 +1226,9 @@ struct CudaHardSwishFunctor : public BaseActivationFunctor { // x , when x >= threshold - offset // x * (x + offset) / scale, otherwise // threshold = scale = 6, offset = 3 by default - __device__ __forceinline__ T operator()(const T& x) const { + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T x = args[0]; T t = static_cast(threshold); T temp = x + static_cast(offset); T temp_max = temp > zero ? temp : zero; @@ -1139,12 +1254,15 @@ struct CudaHardSwishGradFunctor : public BaseActivationFunctor { // dout , when x >= threshold - offset // dout * (2 * x / scale + offset / scale), otherwise // threshold = scale = 6, offset = 3 by default - __device__ __forceinline__ T operator()(const T& dout, const T& x) const { + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + T x = args[1]; T o = static_cast(offset); T s = static_cast(scale); T temp1 = static_cast(x + o > zero); T temp2 = static_cast(x + o < static_cast(threshold)); - return dout * (temp1 * temp2 * (two * x + o) / s + one - temp2); + return args[0] * (temp1 * temp2 * (two * x + o) / s + one - temp2); } static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } @@ -1162,8 +1280,9 @@ struct CudaELUFunctor : public BaseActivationFunctor { } // elu(x) = max(0, x) + min(0, alpha * (exp(x) - 1)) - __device__ __forceinline__ T operator()(const T& arg_x) const { - CT x = static_cast(arg_x); + // Inputs: args[0], the input x + __device__ __forceinline__ T operator()(const T* args) const { + CT x = static_cast(args[0]); CT temp = static_cast(alpha) * (exp(x) - one); CT res = (x > zero ? x : zero) + (temp > zero ? zero : temp); return static_cast(res); @@ -1185,10 +1304,11 @@ struct CudaELUGradFunctor : public BaseActivationFunctor { // dx = dout * alpha * x.exp(), if alpha > 0 and x <= 0 // dx = dout * (1 + alpha * x.exp()), if alpha <= 0 and x > 0 // dx = 0, if alpha <= 0 and x <=0 - __device__ __forceinline__ T operator()(const T& arg_dout, - const T& arg_x) const { - MPType dout = static_cast(arg_dout); - MPType x = static_cast(arg_x); + // Inputs: args[0], the input dout + // args[1], the input x + __device__ __forceinline__ T operator()(const T* args) const { + MPType dout = static_cast(args[0]); + MPType x = static_cast(args[1]); MPType a = static_cast(alpha); MPType temp_a_pos = static_cast(alpha > 0.0f); MPType temp_a_neg = static_cast(alpha <= 0.0f); diff --git a/paddle/fluid/operators/controlflow/bitwise_op.cu b/paddle/fluid/operators/controlflow/bitwise_op.cu index 2f4098c2608220944eee62f48b4a029de4f7c00c..b549f7e33005e33a2f73e0617beb2a8b12dd1245 100644 --- a/paddle/fluid/operators/controlflow/bitwise_op.cu +++ b/paddle/fluid/operators/controlflow/bitwise_op.cu @@ -18,46 +18,60 @@ limitations under the License. */ namespace paddle { namespace operators { -template -class BinaryBitwiseOpKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - using T = typename Functor::ELEM_TYPE; +#define BITWISE_BINARY_FUNCTOR(func, expr, bool_expr) \ + template \ + struct Bitwise##func##CUDAFunctor { \ + using ELEM_TYPE = T; \ + HOSTDEVICE T operator()(const T* args) const { \ + return args[0] expr args[1]; \ + } \ + }; \ + \ + template <> \ + struct Bitwise##func##CUDAFunctor { \ + using ELEM_TYPE = bool; \ + HOSTDEVICE bool operator()(const bool* args) const { \ + return args[0] bool_expr args[1]; \ + } \ + }; - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* out = ctx.Output("Out"); - out->mutable_data(ctx.GetPlace()); +BITWISE_BINARY_FUNCTOR(And, &, &&) +BITWISE_BINARY_FUNCTOR(Or, |, ||) +BITWISE_BINARY_FUNCTOR(Xor, ^, !=) +#undef BITWISE_BINARY_FUNCTOR - auto functor = Functor(); - std::vector ins = {x, y}; - std::vector outs = {out}; - const auto& cuda_ctx = - ctx.template device_context(); - LaunchElementwiseCudaKernel( - cuda_ctx, ins, &outs, -1, functor); - } +template +struct BitwiseNotCUDAFunctor { + using ELEM_TYPE = T; + HOSTDEVICE T operator()(const T* args) const { return ~args[0]; } +}; + +template <> +struct BitwiseNotCUDAFunctor { + using ELEM_TYPE = bool; + HOSTDEVICE bool operator()(const bool* args) const { return !args[0]; } }; template -class UnaryBitwiseOpKernel +class BinaryBitwiseOpKernel : public framework::OpKernel { public: + using T = typename Functor::ELEM_TYPE; void Compute(const framework::ExecutionContext& ctx) const override { - using T = typename Functor::ELEM_TYPE; - - auto* x = ctx.Input("X"); - auto* out = ctx.Output("Out"); - out->mutable_data(ctx.GetPlace()); - auto functor = Functor(); - std::vector ins = {x}; - std::vector outs = {out}; + std::vector ins; + std::vector outs; const auto& cuda_ctx = ctx.template device_context(); - LaunchSameDimsElementwiseCudaKernel( - cuda_ctx, ins, &outs, functor); + int axis = PackTensorsIntoVector(ctx, &ins, &outs); + + if (ins.size() == 1) { + LaunchElementwiseCudaKernel( + cuda_ctx, ins, &outs, axis, functor); + } else { + LaunchElementwiseCudaKernel( + cuda_ctx, ins, &outs, axis, functor); + } } }; @@ -67,7 +81,7 @@ class UnaryBitwiseOpKernel namespace ops = ::paddle::operators; namespace plat = ::paddle::platform; -REGISTER_BINARY_BITWISE_KERNEL(bitwise_and, CUDA, ops::BitwiseAndFunctor); -REGISTER_BINARY_BITWISE_KERNEL(bitwise_or, CUDA, ops::BitwiseOrFunctor); -REGISTER_BINARY_BITWISE_KERNEL(bitwise_xor, CUDA, ops::BitwiseXorFunctor); -REGISTER_UNARY_BITWISE_KERNEL(bitwise_not, CUDA, ops::BitwiseNotFunctor); +REGISTER_BINARY_BITWISE_KERNEL(bitwise_and, CUDA, ops::BitwiseAndCUDAFunctor); +REGISTER_BINARY_BITWISE_KERNEL(bitwise_or, CUDA, ops::BitwiseOrCUDAFunctor); +REGISTER_BINARY_BITWISE_KERNEL(bitwise_xor, CUDA, ops::BitwiseXorCUDAFunctor); +REGISTER_BINARY_BITWISE_KERNEL(bitwise_not, CUDA, ops::BitwiseNotCUDAFunctor); diff --git a/paddle/fluid/operators/controlflow/compare_all_op.cu b/paddle/fluid/operators/controlflow/compare_all_op.cu index 8e8f3f01104f50b84d6404ad62a819d41f19e7d1..9e22d74d6e2aac97ad23f99ad9d5b6a7f9924bbe 100644 --- a/paddle/fluid/operators/controlflow/compare_all_op.cu +++ b/paddle/fluid/operators/controlflow/compare_all_op.cu @@ -17,6 +17,9 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/operators/reduce_ops/cub_reduce.h" +namespace ops = paddle::operators; +namespace plat = paddle::platform; + namespace paddle { namespace operators { @@ -35,6 +38,23 @@ struct BitwiseAdd { } }; +template +struct CudaEqualReduceFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T args[]) const { + return (args[0] == args[1]); + } +}; + +template +struct CudaEqualReduceFunctor< + T, typename std::enable_if::value>::type> { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T args[]) const { + return fabs(static_cast(args[0] - args[1])) < 1e-8; + } +}; + template class CompareReduceOpKernel : public framework::OpKernel { @@ -77,9 +97,6 @@ class CompareReduceOpKernel } // namespace operators } // namespace paddle -namespace ops = paddle::operators; -namespace plat = paddle::platform; - #define REGISTER_COMPARE_REDUCE_CUDA_KERNEL(op_type, functor) \ REGISTER_OP_CUDA_KERNEL( \ op_type, \ @@ -92,5 +109,5 @@ namespace plat = paddle::platform; ops::CompareReduceOpKernel>); -REGISTER_COMPARE_REDUCE_CUDA_KERNEL(equal_all, EqualReduceFunctor) +REGISTER_COMPARE_REDUCE_CUDA_KERNEL(equal_all, CudaEqualReduceFunctor) #undef REGISTER_COMPARE_REDUCE_CUDA_KERNEL diff --git a/paddle/fluid/operators/controlflow/compare_op.cu b/paddle/fluid/operators/controlflow/compare_op.cu index fc7dce208c48699b89595d2a9fc943716e09ef05..bf7861a03d8d4da4ff1ae65ff62c761ffab914bd 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cu +++ b/paddle/fluid/operators/controlflow/compare_op.cu @@ -21,11 +21,46 @@ namespace plat = paddle::platform; namespace paddle { namespace operators { +#define DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT(func, op) \ + template \ + struct func { \ + using ELEMENT_TYPE = T; \ + inline HOSTDEVICE bool operator()(const T* args) const { \ + return args[0] op args[1]; \ + } \ + }; + +DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT(CudaLessThanFunctor, <) +DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT(CudaLessEqualFunctor, <=) +DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT(CudaGreaterThanFunctor, >) +DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT(CudaGreaterEqualFunctor, >=) +DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT(CudaEqualFunctor, ==) +DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT(CudaNotEqualFunctor, !=) +#undef DEFINE_CMP_BINARY_FUNCTOR_WITH_PONTER_INPUT + +template +struct CudaEqualFunctor< + T, typename std::enable_if::value>::type> { + using ELEMENT_TYPE = T; + HOSTDEVICE bool operator()(const T* args) const { + return fabs(static_cast(args[0] - args[1])) < 1e-8; + } +}; + +template +struct CudaNotEqualFunctor< + T, typename std::enable_if::value>::type> { + using ELEMENT_TYPE = T; + HOSTDEVICE bool operator()(const T* args) const { + return fabs(static_cast(args[0] - args[1])) > 1e-8; + } +}; + template class CompareOpKernel - : public framework::OpKernel { + : public framework::OpKernel { public: - using InT = typename Functor::ELEM_TYPE; + using InT = typename Functor::ELEMENT_TYPE; using OutT = bool; void Compute(const framework::ExecutionContext& ctx) const override { auto functor = Functor(); @@ -52,10 +87,10 @@ class CompareOpKernel ops::CompareOpKernel, void>, \ ops::CompareOpKernel, void>); -REGISTER_CUDA_COMPARE_KERNEL(equal, EqualFunctor) -REGISTER_CUDA_COMPARE_KERNEL(not_equal, NotEqualFunctor) -REGISTER_CUDA_COMPARE_KERNEL(less_than, LessThanFunctor) -REGISTER_CUDA_COMPARE_KERNEL(less_equal, LessEqualFunctor) -REGISTER_CUDA_COMPARE_KERNEL(greater_than, GreaterThanFunctor) -REGISTER_CUDA_COMPARE_KERNEL(greater_equal, GreaterEqualFunctor) +REGISTER_CUDA_COMPARE_KERNEL(equal, CudaEqualFunctor) +REGISTER_CUDA_COMPARE_KERNEL(not_equal, CudaNotEqualFunctor) +REGISTER_CUDA_COMPARE_KERNEL(less_than, CudaLessThanFunctor) +REGISTER_CUDA_COMPARE_KERNEL(less_equal, CudaLessEqualFunctor) +REGISTER_CUDA_COMPARE_KERNEL(greater_than, CudaGreaterThanFunctor) +REGISTER_CUDA_COMPARE_KERNEL(greater_equal, CudaGreaterEqualFunctor) #undef REGISTER_CUDA_COMPARE_KERNEL diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cu b/paddle/fluid/operators/elementwise/elementwise_add_op.cu index bd91142882f7650f4dedbee1d3001fe277b12967..69bcd6d0d06ff65c3d3fec6d05656452d6e28fad 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cu @@ -11,7 +11,6 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/operators/elementwise/elementwise_add_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" #include "paddle/fluid/operators/reduce_ops/reduce_functor_op.h" @@ -25,6 +24,21 @@ namespace plat = paddle::platform; namespace paddle { namespace operators { +/* + input: an array; + return: the result of the math functor + 1. For Unary Op, the length of input array is 1, + e.g. Relu: return args[0] > 0 ? args[0] : 0; + 2. For Binary Op, the length of input array is 2, + e.g. Add: return args[0] expr args[1]; +*/ +template +struct CudaAddFunctor { + inline HOSTDEVICE T operator()(const T* args) const { + return args[0] + args[1]; + } +}; + template class ElementwiseAddKernel : public framework::OpKernel { @@ -37,7 +51,7 @@ class ElementwiseAddKernel int axis = PackTensorsIntoVector(ctx, &ins, &outs); LaunchElementwiseCudaKernel( - cuda_ctx, ins, &outs, axis, AddFunctor()); + cuda_ctx, ins, &outs, axis, CudaAddFunctor()); } }; diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cu b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu index 33b6f1d60b8de44cb1763ea6f9473b2852c8c601..bf34db09861b89a46f091f1f5c0b812e90c6f38f 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu @@ -24,6 +24,13 @@ namespace plat = paddle::platform; namespace paddle { namespace operators { +template +struct CudaMulFunctor { + inline HOSTDEVICE T operator()(const T* args) const { + return args[0] * args[1]; + } +}; + template class ElementwiseMulKernel : public framework::OpKernel { @@ -37,7 +44,7 @@ class ElementwiseMulKernel int axis = PackTensorsIntoVector(ctx, &ins, &outs, &x_for_selectedrows); LaunchElementwiseCudaKernel( - cuda_ctx, ins, &outs, axis, MulFunctor()); + cuda_ctx, ins, &outs, axis, CudaMulFunctor()); } }; diff --git a/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h b/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h index 53ac85802a6f43f1fb3d4788fd5fc9b993340ddc..129c90a22be6b216ebfb128becf7a04c3dddcaaf 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h @@ -16,10 +16,11 @@ #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" - namespace paddle { namespace operators { +#define MAX_INPUT_NUM 3 // the max num of ET for BroadcacstConfig + namespace kps = paddle::operators::kernel_primitives; struct DimensionsTransform { @@ -45,9 +46,10 @@ struct DimensionsTransform { axis++; } else { PADDLE_THROW(platform::errors::InvalidArgument( - "The %d-th dimension of input tensor is expected to be equal " - "with the %d-th dimension of output tensor %d or 1, but " - "recieved %d.", + "The %dth dimension of input tensor is expected to be equal " + "with" + "the %dth dimension of output tensor %d or 1, but recieved " + "%d.\n", in_idx + 1, axis + 1, out_dims[axis], in_dim[in_idx])); } } while (in_idx < in_dim.size()); @@ -59,9 +61,10 @@ struct DimensionsTransform { in_idx++; } else { PADDLE_THROW(platform::errors::InvalidArgument( - "The %d-th dimension of input tensor is expected to be equal " - "with the %d-th dimension of output tensor %d or 1, but " - "recieved %d.", + "The %dth dimension of input tensor is expected to be equal " + "with" + "the %dth dimension of output tensor %d or 1, but recieved " + "%d.\n", in_idx + 1, in_idx + 1, out_dims[in_idx], in_dim[in_idx])); } } while (in_idx < dim_size); @@ -162,71 +165,79 @@ struct DimensionsTransform { } }; -template +template __device__ __forceinline__ void LoadData( T *dst, const T *__restrict__ src, uint32_t block_offset, - const kps::details::BroadcastConfig &config, int numel, int num, + const kps::details::BroadcastConfig &config, int numel, int num, bool need_broadcast) { // numel : whole num of output // num: how many data will be deal with in this time if (need_broadcast) { - kps::ReadDataBc(dst, src, block_offset, - config, numel, 1, 1); + kps::ReadDataBc( + dst, src, block_offset, config, numel, 1, 1); } else { kps::ReadData(dst, src + block_offset, num); } } -template +template __device__ void DealSegment( - const framework::Array &ins, OutT *out, - const framework::Array &use_broadcast, uint32_t numel, - const framework::Array, Arity> &configs, + const framework::Array &in, OutT *out, + const framework::Array &use_broadcast, uint32_t numel, + const framework::Array, + MAX_INPUT_NUM> &configlists, int num, Functor func) { - InT args[Arity][VecSize]; + InT args[ET][VecSize]; OutT result[VecSize]; - int block_offset = blockIdx.x * blockDim.x * VecSize; - +// load #pragma unroll - for (int i = 0; i < Arity; i++) { + for (int i = 0; i < ET; i++) { kps::Init(args[i], static_cast(1.0f)); - LoadData(args[i], ins[i], block_offset, - configs[i], numel, num, - use_broadcast[i]); + LoadData(args[i], in[i], block_offset, + configlists[i], numel, num, + use_broadcast[i]); } - - const bool kCallElementwiseAny = - platform::FunctionTraits::has_pointer_args; - ElementwisePrimitiveCaller()(func, args, result); + // compute + if (ET == kUnary) { + kps::ElementwiseUnary(result, args[0], + func); + } else if (ET == kBinary) { + kps::ElementwiseBinary(result, args[0], + args[1], func); + } else { + kps::ElementwiseTernary( + result, args[0], args[1], args[2], func); + } + // compute kps::WriteData(out + block_offset, result, num); } -template +template __global__ void BroadcastKernel( - framework::Array ins, OutT *out, - framework::Array use_broadcast, uint32_t numel, - framework::Array, Arity> configs, + framework::Array in, OutT *out, + framework::Array use_broadcast, uint32_t numel, + framework::Array, MAX_INPUT_NUM> + configlists, int main_tid, int tail_tid, Functor func) { int block_offset = blockIdx.x * blockDim.x * VecSize; // data offset of this block if (blockIdx.x < main_tid) { int num = blockDim.x * VecSize; // blockIdx.x < main_tid - DealSegment( - ins, out, use_broadcast, numel, configs, num, func); + DealSegment( + in, out, use_broadcast, numel, configlists, num, func); } else { // reminder int num = tail_tid; - DealSegment( - ins, out, use_broadcast, numel, configs, num, func); + DealSegment( + in, out, use_broadcast, numel, configlists, num, func); } } -template +template void LaunchKernel(const platform::CUDADeviceContext &ctx, const std::vector &ins, framework::Tensor *out, Functor func, @@ -240,58 +251,53 @@ void LaunchKernel(const platform::CUDADeviceContext &ctx, auto stream = ctx.stream(); OutT *out_data = out->data(); - framework::Array, Arity> configs; - framework::Array use_broadcast; - framework::Array ins_data; + framework::Array, MAX_INPUT_NUM> + configlists; + framework::Array use_broadcast; + framework::Array ins_data; - for (int i = 0; i < Arity; i++) { + for (int i = 0; i < ET; i++) { use_broadcast[i] = (ins[i]->numel() != numel); ins_data[i] = ins[i]->data(); if (use_broadcast[i]) { // get the broadcast config, // if data shape is[m, n], then you should set data_dim = {n, m} // eg: out's shape [3, 45, 1]. then out_dims = {1, 45, 3} - configs[i] = kps::details::BroadcastConfig( + configlists[i] = kps::details::BroadcastConfig( merge_dims.out_dims, merge_dims.in_dims[i], merge_dims.dim_size); } } - BroadcastKernel<<>>( - ins_data, out_data, use_broadcast, numel, configs, main_tid, tail_tid, + BroadcastKernel<<>>( + ins_data, out_data, use_broadcast, numel, configlists, main_tid, tail_tid, func); } -template -void LaunchBroadcastKernelForDifferentVecSize( +template +void LaunchBroadcastKernelForDifferentDimSize( const platform::CUDADeviceContext &ctx, const std::vector &ins, framework::Tensor *out, int axis, Functor func) { const auto merge_dims = DimensionsTransform(ins, out->dims(), axis); - -#define CALL_BROADCAST_FOR_DIM_SIZE(rank) \ - case rank: { \ - LaunchKernel(ctx, ins, out, \ - func, merge_dims); \ +#define DIM_SIZE(size) \ + case size: { \ + LaunchKernel(ctx, ins, out, func, \ + merge_dims); \ } break; switch (merge_dims.dim_size) { - CALL_BROADCAST_FOR_DIM_SIZE(1); - CALL_BROADCAST_FOR_DIM_SIZE(2); - CALL_BROADCAST_FOR_DIM_SIZE(3); - CALL_BROADCAST_FOR_DIM_SIZE(4); - CALL_BROADCAST_FOR_DIM_SIZE(5); - CALL_BROADCAST_FOR_DIM_SIZE(6); - CALL_BROADCAST_FOR_DIM_SIZE(7); - CALL_BROADCAST_FOR_DIM_SIZE(8); - default: { - PADDLE_THROW(platform::errors::InvalidArgument( - "The maximum dimension of input tensor is expected to be less than " - "%d, but recieved %d.\n", - merge_dims.dim_size, framework::DDim::kMaxRank)); - } + DIM_SIZE(1); + DIM_SIZE(2); + DIM_SIZE(3); + DIM_SIZE(4); + DIM_SIZE(5); + DIM_SIZE(6); + DIM_SIZE(7); + DIM_SIZE(8); } -#undef CALL_BROADCAST_FOR_DIM_SIZE +#undef DIM_SIZE } template @@ -299,21 +305,11 @@ void LaunchBroadcastElementwiseCudaKernel( const platform::CUDADeviceContext &ctx, const std::vector &ins, std::vector *outs, int axis, Functor func) { - using Traits = platform::FunctionTraits; - const int kArity = - Traits::has_pointer_args ? static_cast(ET) : Traits::arity; - PADDLE_ENFORCE_EQ(ins.size(), kArity, + PADDLE_ENFORCE_EQ(ET, ElementwiseType::kBinary, platform::errors::InvalidArgument( - "The number of inputs is expected to be equal to the " - "arity of functor. But recieved: the number of inputs " - "is %d, the arity of functor is %d.", - ins.size(), kArity)); - PADDLE_ENFORCE_EQ(kArity, 2, - platform::errors::InvalidArgument( - "Currently only broadcast of binary is supported and " - "verified, but received %d.", - kArity)); - + "Currently, only Support binary calculation, " + "but received %d input tensors.\n", + static_cast(ET))); int in_vec_size = 4; framework::Tensor *out = (*outs)[0]; for (auto *in : ins) { @@ -326,18 +322,18 @@ void LaunchBroadcastElementwiseCudaKernel( switch (vec_size) { case 4: { - LaunchBroadcastKernelForDifferentVecSize( - ctx, ins, out, axis, func); + LaunchBroadcastKernelForDifferentDimSize(ctx, ins, out, + axis, func); break; } case 2: { - LaunchBroadcastKernelForDifferentVecSize( - ctx, ins, out, axis, func); + LaunchBroadcastKernelForDifferentDimSize(ctx, ins, out, + axis, func); break; } case 1: { - LaunchBroadcastKernelForDifferentVecSize( - ctx, ins, out, axis, func); + LaunchBroadcastKernelForDifferentDimSize(ctx, ins, out, + axis, func); break; } default: { @@ -373,5 +369,7 @@ void LaunchElementwiseCudaKernel( } } +#undef MAX_INPUT_NUM + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h index 7bbfefba20fa7572d2756bba8b803d2fcc7f8682..dd8e3d409c01e63e887324223ad49917aca0cd89 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.h @@ -37,10 +37,8 @@ limitations under the License. */ #endif #include -#include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" - #ifdef __HIPCC__ constexpr int ELEMWISE_MAX_BLOCK_DIM = 256; #else @@ -280,6 +278,128 @@ void CommonForwardBroadcastCPU(const framework::Tensor *x, } } +#if defined(__NVCC__) || defined(__HIPCC__) +template +__global__ void ElementwiseKernel(const T *__restrict__ x_data, + const T *__restrict__ y_data, + OutType *__restrict__ out_data, int n, + int post, const size_t total, Functor func) { + int tid = threadIdx.x + blockDim.x * blockIdx.x; + int stride = blockDim.x * gridDim.x; + + for (int i = tid; i < total; i += stride) { + int idx = i / post % n; + out_data[i] = func(x_data[i], y_data[idx]); + } +} + +template +void ComputeElementwiseCUDA(const framework::Tensor *x, + const framework::Tensor *y, framework::Tensor *z, + int pre, int n, int post, + const platform::CUDADeviceContext &ctx, + Functor func, const bool is_xsize_larger = true) { + const T *x_data = x->data(); + const T *y_data = y->data(); + OutType *out_data = z->mutable_data(ctx.GetPlace()); + + int numel = pre * n * post; + int threads = 256; + int blocks = (numel + threads - 1) / threads; + + if (is_xsize_larger) { + ElementwiseKernel<<>>( + x_data, y_data, out_data, n, post, numel, func); + + } else { + ElementwiseKernel<<>>( + y_data, x_data, out_data, n, post, numel, func); + } +} + +template +__global__ void CommonForwardBroadcastCUDAKernel( + const int *x_strides_array, const int *y_strides_array, + const int *out_dims_array, const T *x, const T *y, OutType *out, + int out_size, int max_dim, Functor func, const bool is_xsize_larger) { + for (int out_index = blockIdx.x * blockDim.x + threadIdx.x; + out_index < out_size; out_index += blockDim.x * gridDim.x) { + int x_index = 0; + int y_index = 0; + int out_index_quotient = out_index; + int remainder = 0; +#pragma unroll + for (int i = max_dim - 1; i >= 0; --i) { + GetDivMod(out_index_quotient, out_dims_array[i], &out_index_quotient, + &remainder); + x_index += remainder * x_strides_array[i]; + y_index += remainder * y_strides_array[i]; + } + if (is_xsize_larger) { + out[out_index] = func(x[x_index], y[y_index]); + } else { + out[out_index] = func(y[y_index], x[x_index]); + } + } +} + +template +void CommonForwardBroadcastCUDA( + const framework::Tensor *x, const framework::Tensor *y, + framework::Tensor *z, int *x_dims_array, int *y_dims_array, + int *out_dims_array, int max_dim, const platform::CUDADeviceContext &ctx, + Functor func, const bool is_xsize_larger = true) { + const auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()); + auto cplace = platform::CPUPlace(); + const T *x_data = x->data(); + const T *y_data = y->data(); + OutType *out_data = z->mutable_data(ctx.GetPlace()); + + std::vector x_strides_array(max_dim); + std::vector y_strides_array(max_dim); + int x_stride = 1; + int y_stride = 1; + for (int i = max_dim - 1; i >= 0; i--) { + x_strides_array[i] = x_dims_array[i] == 1 ? 0 : x_stride; + y_strides_array[i] = y_dims_array[i] == 1 ? 0 : y_stride; + x_stride *= x_dims_array[i]; + y_stride *= y_dims_array[i]; + } + + int bytes = max_dim * sizeof(int); + auto x_strides_array_tmp = memory::Alloc(ctx, bytes); + int *x_strides_array_gpu = + reinterpret_cast(x_strides_array_tmp->ptr()); + memory::Copy(gplace, x_strides_array_gpu, cplace, x_strides_array.data(), + bytes, ctx.stream()); + + auto y_strides_array_tmp = memory::Alloc(ctx, bytes); + int *y_strides_array_gpu = + reinterpret_cast(y_strides_array_tmp->ptr()); + memory::Copy(gplace, y_strides_array_gpu, cplace, y_strides_array.data(), + bytes, ctx.stream()); + + auto out_dims_array_tmp = memory::Alloc(ctx, bytes); + int *out_dims_array_gpu = reinterpret_cast(out_dims_array_tmp->ptr()); + memory::Copy(gplace, out_dims_array_gpu, cplace, out_dims_array, bytes, + ctx.stream()); + + const int out_size = std::accumulate(out_dims_array, out_dims_array + max_dim, + 1, std::multiplies()); + dim3 gird_size = dim3( + (out_size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); + dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); + + CommonForwardBroadcastCUDAKernel< + Functor, T, OutType><<>>( + x_strides_array_gpu, y_strides_array_gpu, out_dims_array_gpu, x_data, + y_data, out_data, out_size, max_dim, func, is_xsize_larger); +} + +#endif // __NVCC__ or __HIPCC__ + template void CommonGradBroadcastCPU( const framework::Tensor &x, const framework::Tensor &y, @@ -1797,10 +1917,21 @@ void CommonElementwiseBroadcastForward( y_dims_array.data(), out_dims_array.data(), max_dim, axis); - CommonForwardBroadcastCPU( - x, y, z, x_dims_array.data(), y_dims_array.data(), out_dims_array.data(), - max_dim, ctx.template device_context(), func, - is_xsize_larger); + if (platform::is_gpu_place(ctx.GetPlace())) { +#if defined(__NVCC__) || defined(__HIPCC__) + CommonForwardBroadcastCUDA( + x, y, z, x_dims_array.data(), y_dims_array.data(), + out_dims_array.data(), max_dim, + ctx.template device_context(), func, + is_xsize_larger); +#endif + } else { + CommonForwardBroadcastCPU( + x, y, z, x_dims_array.data(), y_dims_array.data(), + out_dims_array.data(), max_dim, + ctx.template device_context(), func, + is_xsize_larger); + } } template @@ -1844,35 +1975,12 @@ void ElemwiseExplicitGradCompute(const framework::ExecutionContext &ctx, } } -// It is a common implementation to compute binary calculation with the support -// of broadcast, supporting both CPU and GPU. -// - CPU implementation cannot support the case when x needs broadcast, thus -// this function need to be called with XxxFunctor and XxxInverseFunctor, -// like paddle/fluid/operators/elementwise/elementwise_add_op.h#L49 - L55. -// - GPU implementation supports all the broadcast cases, thus there is no need -// to define and call with XxxInverseFunctor. -// TODO(liuyiqun): optimize the CPU implementation to support all broadcast -// cases and avoid the need of XxxInverseFunctor. template void ElementwiseComputeEx(const framework::ExecutionContext &ctx, const framework::Tensor *x, const framework::Tensor *y, int axis, Functor func, framework::Tensor *z) { - if (platform::is_gpu_place(ctx.GetPlace())) { -#if defined(__NVCC__) || defined(__HIPCC__) - std::vector ins = {x, y}; - std::vector outs = {z}; - z->mutable_data(ctx.GetPlace()); - - const auto &dev_ctx = - ctx.template device_context(); - LaunchElementwiseCudaKernel( - dev_ctx, ins, &outs, axis, func); -#endif - return; - } - auto x_dims = x->dims(); auto y_dims = y->dims(); bool is_xsize_larger = true; @@ -1921,6 +2029,15 @@ void ElementwiseComputeEx(const framework::ExecutionContext &ctx, return; } + if (platform::is_gpu_place(ctx.GetPlace())) { +#if defined(__NVCC__) || defined(__HIPCC__) + ComputeElementwiseCUDA( + x, y, z, pre, n, post, + ctx.template device_context(), func, + is_xsize_larger); +#endif + return; + } if (post == 1) { functor.RunRowWise(n, pre); return; diff --git a/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h b/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h index 83aff3b55771efa88e91462e0497d4e8c8de2f94..e591b145d2388707dd3862acc257df7c2173b35f 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h @@ -11,13 +11,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/kernel_primitives/kernel_primitives.h" -#include "paddle/fluid/platform/aligned_vector.h" -#include "paddle/fluid/platform/function_traits.h" +#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/fast_divmod.h" #ifdef __HIPCC__ #define ELEMENTWISE_BLOCK_SIZE 256 @@ -29,8 +28,7 @@ namespace paddle { namespace operators { namespace kps = paddle::operators::kernel_primitives; - -enum ElementwiseType { kUnary = 1, kBinary = 2, kTernary = 3, kAny = -1 }; +enum ElementwiseType { kUnary = 1, kBinary = 2, kTernary = 3 }; /* * According to NVIDIA, if number of threads per block is 64/128/256/512, @@ -57,9 +55,8 @@ inline int GetThreadsConfig(const platform::CUDADeviceContext &ctx, } template -int GetVectorizedSizeForTensors( - const std::vector &ins, - const std::vector &outs) { +int GetVectorizedSizeForIO(const std::vector &ins, + const std::vector &outs) { int vec_size = 4; for (auto iter = ins.begin(); iter != ins.end(); ++iter) { vec_size = std::min(vec_size, @@ -72,88 +69,56 @@ int GetVectorizedSizeForTensors( return vec_size; } -template -struct ElementwisePrimitiveCaller { - __device__ inline OutT operator()(Functor func, InT (*args)[VecSize], - OutT *result); -}; - -template -struct ElementwisePrimitiveCaller { - __device__ inline OutT operator()(Functor func, InT (*args)[VecSize], - OutT *result) { - kps::ElementwiseAny(result, args, - func); +template +__device__ void DealSegment( + const framework::Array &in, OutT *out, int num, + Functor func) { + int data_offset = VecSize * blockIdx.x * blockDim.x; + InT args[ET][VecSize]; + OutT result[VecSize]; +// load data +#pragma unroll + for (int i = 0; i < ET; i++) { + kps::Init(args[i], static_cast(1.0f)); + kps::ReadData(args[i], in[i] + data_offset, + num); } -}; -template -struct ElementwisePrimitiveCaller { - __device__ inline OutT operator()(Functor func, InT (*args)[VecSize], - OutT *result) { + // compute + if (ET == kUnary) { kps::ElementwiseUnary(result, args[0], func); - } -}; - -template -struct ElementwisePrimitiveCaller { - __device__ inline OutT operator()(Functor func, InT (*args)[VecSize], - OutT *result) { + } else if (ET == kBinary) { kps::ElementwiseBinary(result, args[0], args[1], func); - } -}; - -template -struct ElementwisePrimitiveCaller { - __device__ inline OutT operator()(Functor func, InT **args, OutT *result) { + } else { kps::ElementwiseTernary( result, args[0], args[1], args[2], func); } -}; - -template -__device__ void DealSegment( - const framework::Array &in, OutT *out, - int num, Functor func) { - InT args[Arity][VecSize]; - OutT result[VecSize]; - int data_offset = VecSize * blockIdx.x * blockDim.x; - -#pragma unroll - for (int i = 0; i < Arity; i++) { - kps::Init(args[i], static_cast(1.0f)); - kps::ReadData(args[i], in[i] + data_offset, - num); - } - - const bool kCallElementwiseAny = - platform::FunctionTraits::has_pointer_args; - ElementwisePrimitiveCaller()(func, args, result); + // store kps::WriteData(out + data_offset, result, num); } -template +template __global__ void ElementVectorizeKernel( - framework::Array ins, OutT *out, int size, + framework::Array in, OutT *out, int size, Functor func) { int data_offset = VecSize * blockIdx.x * blockDim.x; int num = size - data_offset; // the num this time have to deal with if (VecSize * blockDim.x > num) { // reminder segment - DealSegment(ins, out, num, func); + DealSegment(in, out, num, func); } else { // complete segment - DealSegment(ins, out, num, func); + DealSegment(in, out, num, func); } } -template +template void ElementwiseCudaKernel(const platform::CUDADeviceContext &ctx, const std::vector &ins, std::vector *outs, @@ -164,14 +129,14 @@ void ElementwiseCudaKernel(const platform::CUDADeviceContext &ctx, ((numel + VecSize - 1) / VecSize + block_size - 1) / block_size; auto stream = ctx.stream(); - OutT *out_data = (*outs)[0]->data(); - framework::Array ins_data; - for (int i = 0; i < Arity; i++) { - ins_data[i] = ins[i]->data(); + OutT *out = (*outs)[0]->data(); + framework::Array in; + for (int i = 0; i < ET; i++) { + in[i] = ins[i]->data(); } - ElementVectorizeKernel<<>>( - ins_data, out_data, numel, func); + ElementVectorizeKernel<<>>( + in, out, numel, func); } template @@ -179,30 +144,17 @@ void LaunchSameDimsElementwiseCudaKernel( const platform::CUDADeviceContext &ctx, const std::vector &ins, std::vector *outs, Functor func) { - using Traits = platform::FunctionTraits; - const int kArity = - Traits::has_pointer_args ? static_cast(ET) : Traits::arity; - PADDLE_ENFORCE_EQ(ins.size(), kArity, - platform::errors::InvalidArgument( - "The number of inputs is expected to be equal to the " - "arity of functor. But recieved: the number of inputs " - "is %d, the arity of functor is %d.", - ins.size(), kArity)); - // calculate the max vec_size for all ins and outs - int vec_size = GetVectorizedSizeForTensors(ins, *outs); + int vec_size = GetVectorizedSizeForIO(ins, *outs); switch (vec_size) { case 4: - ElementwiseCudaKernel(ctx, ins, outs, - func); + ElementwiseCudaKernel(ctx, ins, outs, func); break; case 2: - ElementwiseCudaKernel(ctx, ins, outs, - func); + ElementwiseCudaKernel(ctx, ins, outs, func); break; case 1: - ElementwiseCudaKernel(ctx, ins, outs, - func); + ElementwiseCudaKernel(ctx, ins, outs, func); break; default: { PADDLE_THROW(platform::errors::Unimplemented( diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cu b/paddle/fluid/operators/elementwise/elementwise_sub_op.cu index 2643cc0e7a27547825c56483de3f498d6cf57751..da9610243f7c4df3300b3ea8b9137cea84e5c72b 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cu @@ -22,6 +22,13 @@ namespace plat = paddle::platform; namespace paddle { namespace operators { +template +struct CudaSubFunctor { + inline HOSTDEVICE T operator()(const T* args) const { + return args[0] - args[1]; + } +}; + template class ElementwiseSubKernel : public framework::OpKernel { @@ -34,7 +41,7 @@ class ElementwiseSubKernel int axis = PackTensorsIntoVector(ctx, &ins, &outs); LaunchElementwiseCudaKernel( - cuda_ctx, ins, &outs, axis, SubFunctor()); + cuda_ctx, ins, &outs, axis, CudaSubFunctor()); } }; diff --git a/paddle/fluid/operators/fused/attn_bias_add.cu.h b/paddle/fluid/operators/fused/attn_bias_add.cu.h index a8bd35a1b7309a524d1e1ae786c7cbe49fd2eab8..37e7bd9caa67ee35f97be05f1993d98056ca5755 100644 --- a/paddle/fluid/operators/fused/attn_bias_add.cu.h +++ b/paddle/fluid/operators/fused/attn_bias_add.cu.h @@ -52,8 +52,10 @@ template using ReduceParamType = typename CudnnDataType::BatchNormParamType; template -struct AddFunctor { - inline HOSTDEVICE T operator()(const T& a, const T& b) const { return a + b; } +struct CudaAddFunctor { + inline HOSTDEVICE T operator()(const T* args) const { + return args[0] + args[1]; + } }; template out_dims = {n, m}; configlists[1] = kps::details::BroadcastConfig<2>(out_dims, input1_dims, 2); - auto func = AddFunctor(); + auto func = CudaAddFunctor(); auto stream = ctx.stream(); switch (vec_size) { case 4: { diff --git a/paddle/fluid/operators/kernel_primitives/compute_primitives.h b/paddle/fluid/operators/kernel_primitives/compute_primitives.h index 2898a11fd7a60165dbf3306045e2a0b304dd0f04..58642ef2631568414701de7d192124d2cdf51668 100644 --- a/paddle/fluid/operators/kernel_primitives/compute_primitives.h +++ b/paddle/fluid/operators/kernel_primitives/compute_primitives.h @@ -21,6 +21,7 @@ #include #endif +// #include #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/float16.h" @@ -134,114 +135,53 @@ __device__ __forceinline__ T BlockYReduce(T val, ReduceOp reducer) { } // namespace details -/** - * @brief unary function - * @param - * T: data type of in - * OutT: data type of out - * NX: the cols of in - * NY: the rows of in - * BlockSize: the config of this device - * OpFunc: compute functor which have an operator() as following - * template - * struct XxxFunctor { - * HOSTDEVICE OutT operator()(const T& a) const { - * return ...; - * } - * }; - */ -template -__device__ __forceinline__ void ElementwiseUnary(OutT* out, const T* in, - OpFunc compute) { -#pragma unroll - for (int idx = 0; idx < NX * NY; idx++) { - out[idx] = static_cast(compute(in[idx])); - } -} +/*************************** Compute Function****************************/ /** * @brief binary function, in1 and in2 have same shape - * @param + * @param: * T: data type of in1, in2 * OutT: data type of out * NX: the cols of in1, in2 * NY: the rows of in1, in2 * BlockSize: the config of this device - * OpFunc: compute functor which have an operator() as following - * template - * struct XxxFunctor { - * HOSTDEVICE OutT operator()(const T& a, const T& b) const { - * return ...; - * } - * }; + * OpFunc: compute functor eg: in1 + in2, in1 - in2 */ template __device__ __forceinline__ void ElementwiseBinary(OutT* out, const T* in1, const T* in2, OpFunc compute) { + T args[2]; #pragma unroll for (int idx = 0; idx < NX * NY; ++idx) { - out[idx] = static_cast(compute(in1[idx], in2[idx])); + args[0] = in1[idx]; + args[1] = in2[idx]; + out[idx] = static_cast(compute(args)); } } /** * @brief ternary function, in1, in2 and in3 have same shape - * @param + * @param: * T: data type of in1, in2, in3 * OutT: data type of out * NX: the cols of in1, in2 * NY: the rows of in1, in2 * BlockSize: the config of this device - * OpFunc: compute functor which have an operator() as following - * template - * struct XxxFunctor { - * HOSTDEVICE OutT operator()(const T& a, const T& b, const T& c) const { - * return ...; - * } - * }; + * OpFunc: compute functor eg: out = in1 * in2 + in3 */ template __device__ __forceinline__ void ElementwiseTernary(OutT* out, const T* in1, const T* in2, const T* in3, OpFunc compute) { + T args[3]; #pragma unroll for (int idx = 0; idx < NX * NY; ++idx) { - out[idx] = static_cast(compute(in1[idx], in2[idx], in3[idx])); - } -} - -/** - * @brief a general function for elementwise computation, all inputs have - * the same shape. - * @param - * T: data type of in1, in2, in3 - * OutT: data type of out - * NX: the cols of in1, in2 - * NY: the rows of in1, in2 - * BlockSize: the config of this device - * OpFunc: compute functor which have an operator() as following - * template - * struct XxxFunctor { - * HOSTDEVICE OutT operator()(const T* args) const { - * return ...; - * } - * }; - */ -template -__device__ __forceinline__ void ElementwiseAny(OutT* out, T (*ins)[NX * NY], - OpFunc compute) { - T args[Arity]; -#pragma unroll - for (int idx = 0; idx < NX * NY; ++idx) { -#pragma unroll - for (int j = 0; j < Arity; ++j) { - args[j] = ins[j][idx]; - } + args[0] = in1[idx]; + args[1] = in2[idx]; + args[2] = in3[idx]; out[idx] = static_cast(compute(args)); } } @@ -249,7 +189,7 @@ __device__ __forceinline__ void ElementwiseAny(OutT* out, T (*ins)[NX * NY], /** * @brief cycle binary function, in1's shape size is [1, NX], in2's shape size * is [NY, NX], out's shape size is [NY, NX] - * @param + * @param: * T: data type of in1, in2 * OutT: data type of out * NX: the cols of in1, in2 @@ -271,6 +211,26 @@ __device__ __forceinline__ void CycleBinary(OutT* out, const T* in1, } } +/** + * @brief unary function + * @param: + * T: data type of in + * OutT: data type of out + * NX: the cols of in + * NY: the rows of in + * BlockSize: the config of this device + * OpFunc: compute functor eg: relu, exp + */ +template +__device__ __forceinline__ void ElementwiseUnary(OutT* out, const T* in, + OpFunc compute) { +#pragma unroll + for (int idx = 0; idx < NX * NY; idx++) { + out[idx] = static_cast(compute(in + idx)); + } +} + /** * @brief reduce function, in's shape size is [NX, NY]. * If ReduceMode == kLocalMode then reduce NX, the shape of out is [NY, 1], @@ -278,7 +238,7 @@ __device__ __forceinline__ void CycleBinary(OutT* out, const T* in1, * shape of out is [NY, NX]. If reduce_last_dim is false and reduce_num was * split, BlockYReduce will be called. If reduce_last_dim is true and * reduce_num was split, BlockXReduce will be called - * @typename + * @typename: * T: data type of in * NX: the cols of in * NY: the rows of in diff --git a/paddle/fluid/operators/lgamma_op.cu b/paddle/fluid/operators/lgamma_op.cu index baf86c99b5678dfb5475c7217f8be17f5bccd505..befd31e3bd8b1898ad6c59dca80dac3ae6de339d 100644 --- a/paddle/fluid/operators/lgamma_op.cu +++ b/paddle/fluid/operators/lgamma_op.cu @@ -15,14 +15,18 @@ #include #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/fluid/operators/lgamma_op.h" +#include "paddle/fluid/operators/math/complex_functors.h" namespace paddle { namespace operators { +template +struct CudaLgammaFunctor; + template -struct CudaLgammaFunctor { - __device__ __forceinline__ T operator()(const T& x) const { - return Eigen::numext::lgamma(x); +struct CudaLgammaFunctor>> { + __device__ __forceinline__ T operator()(const T* args) const { + return Eigen::numext::lgamma(args[0]); } }; @@ -33,14 +37,15 @@ class LgammaKernel void Compute(const framework::ExecutionContext& context) const override { const Tensor* x = context.Input("X"); Tensor* out = context.Output("Out"); - out->mutable_data(context.GetPlace()); + out->mutable_data>(context.GetPlace()); auto& dev_ctx = context.device_context(); std::vector ins = {x}; std::vector outs = {out}; auto functor = CudaLgammaFunctor(); - LaunchSameDimsElementwiseCudaKernel( - dev_ctx, ins, &outs, functor); + LaunchSameDimsElementwiseCudaKernel>(dev_ctx, ins, &outs, + functor); } }; diff --git a/paddle/fluid/operators/matrix_rank_op.cu b/paddle/fluid/operators/matrix_rank_op.cu index d85a262b5e910a403620e2ac947e2c930b3fd2b5..c6f85abac97d6fd5336d1a72cce4aab7baa8608d 100644 --- a/paddle/fluid/operators/matrix_rank_op.cu +++ b/paddle/fluid/operators/matrix_rank_op.cu @@ -129,10 +129,17 @@ class MatrixRankGPUKernel : public framework::OpKernel { compare_result.mutable_data(detail::NewAxisDim(dim_out, k), context.GetPlace()); int axis = -1; - ElementwiseComputeEx, platform::CUDADeviceContext, T, - int64_t>(context, &eigenvalue_tensor, &tol_tensor, - axis, GreaterThanFunctor(), - &compare_result); + if (eigenvalue_tensor.dims().size() >= tol_tensor.dims().size()) { + ElementwiseComputeEx, platform::CUDADeviceContext, + T, int64_t>(context, &eigenvalue_tensor, &tol_tensor, + axis, GreaterThanFunctor(), + &compare_result); + } else { + ElementwiseComputeEx, platform::CUDADeviceContext, T, + int64_t>(context, &eigenvalue_tensor, &tol_tensor, + axis, LessThanFunctor(), + &compare_result); + } auto dito_int = math::DeviceIndependenceTensorOperations(context); diff --git a/paddle/fluid/platform/function_traits.h b/paddle/fluid/platform/function_traits.h deleted file mode 100644 index 1847a5e1f872021c590ef7f771f6a69b82a8bd7f..0000000000000000000000000000000000000000 --- a/paddle/fluid/platform/function_traits.h +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.1 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.1 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include - -namespace paddle { -namespace platform { - -// Declare a template class with a single template parameter. -template -struct FunctionTraits; - -// A forwarding trait allowing functors (objects which have an operator()) -// to be used with this traits class. -template -struct FunctionTraits : public FunctionTraits {}; - -// A partial specialization of FunctionTraits for pointers to member functions. -template -struct FunctionTraits { - static const size_t arity = sizeof...(Args); - static const bool has_pointer_args = - (arity == 1) && - (std::is_pointer< - typename std::tuple_element<0, std::tuple>::type>::value); -}; - -} // namespace platform -} // namespace paddle