diff --git a/paddle/fluid/operators/abs_op.cu b/paddle/fluid/operators/abs_op.cu index 94b0a3ae729380c90f1351df3b2d42d34d88be57..86748d4505d28708ed2d803edc1d52501889e51e 100644 --- a/paddle/fluid/operators/abs_op.cu +++ b/paddle/fluid/operators/abs_op.cu @@ -24,14 +24,14 @@ struct CudaAbsFunctor; template struct CudaAbsFunctor>> { - __device__ __forceinline__ math::Real operator()(const T& x) const { + __device__ __forceinline__ math::Real operator()(const T x) const { return abs(x); } }; template struct CudaAbsFunctor>> { - __device__ __forceinline__ T operator()(const T& x) const { + __device__ __forceinline__ T operator()(const T x) const { return std::abs(x); } }; diff --git a/paddle/fluid/operators/bce_loss_op.cu b/paddle/fluid/operators/bce_loss_op.cu index 18562b243255be9077cb2310de3d7f1d4857e969..da96aa92cd25a96d250ed5c2b86abf42dd16b927 100644 --- a/paddle/fluid/operators/bce_loss_op.cu +++ b/paddle/fluid/operators/bce_loss_op.cu @@ -28,8 +28,8 @@ template struct BCELossGradFunctor { T one = static_cast(1.0f); T eps = static_cast(1e-12); - __device__ __forceinline__ T operator()(const T& x, const T& label, - const T& dout) const { + __device__ __forceinline__ T operator()(const T x, const T label, + const T dout) const { T term1 = max((one - x) * x, eps); return (dout * (x - label) / term1); } diff --git a/paddle/fluid/operators/clip_op.h b/paddle/fluid/operators/clip_op.h index f08a7b2d573145ecc057f70aa5c8c02465746da8..3672fa983e495cb6a4ec5b5fb488a7a5a40fb9b2 100644 --- a/paddle/fluid/operators/clip_op.h +++ b/paddle/fluid/operators/clip_op.h @@ -32,7 +32,7 @@ template class ClipFunctor { public: explicit ClipFunctor(const T min, const T max) : min_(min), max_(max) {} - HOSTDEVICE T operator()(const T& x) const { + HOSTDEVICE T operator()(const T x) const { return x < min_ ? min_ : x > max_ ? max_ : x; } diff --git a/paddle/fluid/operators/p_norm_op.cu b/paddle/fluid/operators/p_norm_op.cu index 1db6f6e51746282f89e9c160a0d370b72eecd605..b2a9ca6f93742730173b9268abc4ede301d755d4 100644 --- a/paddle/fluid/operators/p_norm_op.cu +++ b/paddle/fluid/operators/p_norm_op.cu @@ -63,7 +63,7 @@ __device__ __forceinline__ double inline_pow(double base, double exponent) { template struct NonzeroFunctor { HOSTDEVICE explicit inline NonzeroFunctor() {} - HOSTDEVICE inline T operator()(const T& x) const { + HOSTDEVICE inline T operator()(const T x) const { return static_cast(static_cast(x) != 0); } }; @@ -71,7 +71,7 @@ struct NonzeroFunctor { template struct AbsFunctor { HOSTDEVICE explicit inline AbsFunctor() {} - HOSTDEVICE inline T operator()(const T& x) const { + HOSTDEVICE inline T operator()(const T x) const { return static_cast(inline_abs(x)); } }; @@ -81,7 +81,7 @@ struct UnsignedPowFunctor { HOSTDEVICE explicit inline UnsignedPowFunctor(float porder) { this->porder = porder; } - HOSTDEVICE inline Ty operator()(const Tx& x) const { + HOSTDEVICE inline Ty operator()(const Tx x) const { return static_cast(inline_pow(inline_abs(x), static_cast(porder))); } float porder; @@ -90,7 +90,7 @@ struct UnsignedPowFunctor { template struct PowFunctor { HOSTDEVICE explicit inline PowFunctor(float porder) { this->porder = porder; } - HOSTDEVICE inline Ty operator()(const Tx& x) const { + HOSTDEVICE inline Ty operator()(const Tx x) const { return static_cast(inline_pow(x, static_cast(porder))); } float porder; diff --git a/paddle/fluid/operators/renorm_op.cu b/paddle/fluid/operators/renorm_op.cu index 1798faa759bed013c35f93bf0d2623f2a17da49a..b21b9fde56f247b5a26f1b9b600fe5da76111627 100644 --- a/paddle/fluid/operators/renorm_op.cu +++ b/paddle/fluid/operators/renorm_op.cu @@ -42,7 +42,7 @@ struct UnsignedPowFunctor { HOSTDEVICE explicit inline UnsignedPowFunctor(float porder) { this->porder = porder; } - HOSTDEVICE inline Ty operator()(const Tx& x) const { + HOSTDEVICE inline Ty operator()(const Tx x) const { return static_cast(inline_pow(inline_abs(x), static_cast(porder))); } float porder; diff --git a/paddle/pten/kernels/gpu/cast_kernel.cu b/paddle/pten/kernels/gpu/cast_kernel.cu index 9f65400f93b9f0cbac0e8aae41fa4678b52a8bfa..0bbe7a3a132d1c0737d174a9a0c99b22fd48a36a 100644 --- a/paddle/pten/kernels/gpu/cast_kernel.cu +++ b/paddle/pten/kernels/gpu/cast_kernel.cu @@ -30,7 +30,7 @@ namespace pten { template struct CastFuctor { - __device__ __forceinline__ OutT operator()(const InT& x) const { + __device__ __forceinline__ OutT operator()(const InT x) const { return static_cast(x); } }; diff --git a/paddle/pten/kernels/gpu/scale_kernel.cu b/paddle/pten/kernels/gpu/scale_kernel.cu index f4bb5c5dbf75502bfad09c987d3da22864d99403..68574c063e77f34988e468c5f46fcbfabba22b88 100644 --- a/paddle/pten/kernels/gpu/scale_kernel.cu +++ b/paddle/pten/kernels/gpu/scale_kernel.cu @@ -34,7 +34,7 @@ struct ScaleFunctor { bias_after_scale = is_bias_after_sacle; } - __device__ __forceinline__ InT operator()(const InT& x) const { + __device__ __forceinline__ InT operator()(const InT x) const { if (bias_after_scale) { return scale * x + bias; } else {