From 10a38b4e5c685066f5a710175983b86e7d5997ab Mon Sep 17 00:00:00 2001 From: Galaxy1458 <55453380+Galaxy1458@users.noreply.github.com> Date: Tue, 16 May 2023 19:26:50 +0800 Subject: [PATCH] remove some [-Wunused-parameter] warning and fix a file to pass cpplint (#53814) * test,test=develop * test,test=develop * test,test=develop * test,test=develop * test,test=develop --- .../framework/details/nan_inf_utils_detail.h | 4 +- .../ir/fusion_group/code_generator_helper.h | 2 +- paddle/fluid/imperative/data_loader.cc | 3 +- paddle/fluid/imperative/infer_shape_context.h | 28 ++-- .../fluid/imperative/infer_var_type_context.h | 40 ++--- .../fluid/operators/collective/c_concat_op.h | 2 +- .../c_softmax_with_cross_entropy_op.h | 2 +- paddle/fluid/operators/controlflow/feed_op.cc | 2 +- .../operators/elementwise/elementwise_op.h | 10 +- .../fused/mkldnn/multi_gru_mkldnn_op.cc | 2 +- paddle/fluid/platform/device_context.cc | 3 +- .../cpu/margin_cross_entropy_kernel.cc | 26 ++-- paddle/phi/kernels/cpu/rnn_functor.h | 2 +- paddle/phi/kernels/cpu/sgd_kernel.cc | 20 +-- paddle/phi/kernels/cpu/uniform_kernel.cc | 2 +- paddle/phi/kernels/funcs/activation_functor.h | 87 +++++------ .../phi/kernels/funcs/detection/poly_util.cc | 138 ------------------ .../phi/kernels/funcs/detection/poly_util.h | 103 ++++++++++++- .../phi/kernels/legacy/cpu/uniform_kernel.cc | 2 +- 19 files changed, 218 insertions(+), 260 deletions(-) delete mode 100644 paddle/phi/kernels/funcs/detection/poly_util.cc diff --git a/paddle/fluid/framework/details/nan_inf_utils_detail.h b/paddle/fluid/framework/details/nan_inf_utils_detail.h index f4ee2c20b9d..ed2fa25a5ae 100644 --- a/paddle/fluid/framework/details/nan_inf_utils_detail.h +++ b/paddle/fluid/framework/details/nan_inf_utils_detail.h @@ -61,7 +61,9 @@ HOSTDEVICE bool NeedPrint(MT max_value, MT min_value, int check_nan_inf_level) { template ::value, bool> = true> -HOSTDEVICE bool NeedPrint(MT max_value, MT min_value, int check_nan_inf_level) { +HOSTDEVICE bool NeedPrint(MT max_value UNUSED, + MT min_value UNUSED, + int check_nan_inf_level) { if (check_nan_inf_level >= 3) { return true; } diff --git a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.h b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.h index 0980b17853c..e068403181b 100644 --- a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.h +++ b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.h @@ -95,7 +95,7 @@ class TemplateVariable { strings_[identifier] = expression; } - void Remove(std::string identifier, std::string expression) { + void Remove(std::string identifier, std::string expression UNUSED) { for (auto it = strings_.begin(); it != strings_.end();) { if (it->first == identifier) { it = strings_.erase(it); diff --git a/paddle/fluid/imperative/data_loader.cc b/paddle/fluid/imperative/data_loader.cc index 65f6ef4e53c..f225b871b68 100644 --- a/paddle/fluid/imperative/data_loader.cc +++ b/paddle/fluid/imperative/data_loader.cc @@ -73,7 +73,8 @@ void EraseLoadProcessPIDs(int64_t key) { } while (0) #define REGISTER_SIGNAL_HANDLER(SIGNAL, HANDLER_NAME, ERROR_MSG) \ - static void HANDLER_NAME(int sig, siginfo_t *info, void *ctx) { \ + static void HANDLER_NAME( \ + int sig UNUSED, siginfo_t *info UNUSED, void *ctx UNUSED) { \ auto _w = \ write(STDERR_FILENO, ERROR_MSG, sizeof(ERROR_MSG) / sizeof(char)); \ (void)_w; \ diff --git a/paddle/fluid/imperative/infer_shape_context.h b/paddle/fluid/imperative/infer_shape_context.h index 6f1f54de8a9..80c52ab4ac1 100644 --- a/paddle/fluid/imperative/infer_shape_context.h +++ b/paddle/fluid/imperative/infer_shape_context.h @@ -237,14 +237,14 @@ class DygraphInferShapeContext : public framework::InferShapeContext { } } - void ShareAllLoD(const std::string& in, - const std::string& out) const override { + void ShareAllLoD(const std::string& in UNUSED, + const std::string& out UNUSED) const override { // do nothing } - void ShareLoD(const std::string& in, - const std::string& out, - size_t i = 0, - size_t j = 0) const override { + void ShareLoD(const std::string& in UNUSED, + const std::string& out UNUSED, + size_t i UNUSED = 0, + size_t j UNUSED = 0) const override { // do nothing } @@ -415,14 +415,15 @@ class DygraphInferShapeContext : public framework::InferShapeContext { } } - int32_t GetLoDLevel(const std::string& in, size_t i = 0) const override { + int32_t GetLoDLevel(const std::string& in UNUSED, + size_t i UNUSED = 0) const override { PADDLE_THROW(platform::errors::PermissionDenied( "GetLoDLevel function not support in dygraph mode")); } - void SetLoDLevel(const std::string& out, - int32_t lod_level, - size_t j = 0) const override { + void SetLoDLevel(const std::string& out UNUSED, + int32_t lod_level UNUSED, + size_t j UNUSED = 0) const override { PADDLE_THROW(platform::errors::PermissionDenied( "SetLoDLevel function not support in dygraph mode")); } @@ -452,7 +453,8 @@ class DygraphInferShapeContext : public framework::InferShapeContext { } } - std::vector GetRepeatedDims(const std::string& name) const override { + std::vector GetRepeatedDims( + const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "GetRepeatedDims not support in dygraph runtime")); } @@ -486,8 +488,8 @@ class DygraphInferShapeContext : public framework::InferShapeContext { } } - void SetRepeatedDims(const std::string& name, - const std::vector& dims) override { + void SetRepeatedDims(const std::string& name UNUSED, + const std::vector& dims UNUSED) override { PADDLE_THROW(platform::errors::PermissionDenied( "SetRepeatedDims not support in dygraph runtime")); } diff --git a/paddle/fluid/imperative/infer_var_type_context.h b/paddle/fluid/imperative/infer_var_type_context.h index 36558366e1d..fcf9c1d3ab4 100644 --- a/paddle/fluid/imperative/infer_var_type_context.h +++ b/paddle/fluid/imperative/infer_var_type_context.h @@ -145,8 +145,8 @@ class RuntimeInferVarTypeContext : public framework::InferVarTypeContext { } void SetOutputDataType(const std::string& name, - framework::proto::VarType::Type type, - int index = 0) override { + framework::proto::VarType::Type type UNUSED, + int index UNUSED = 0) override { VLOG(10) << "Set data type in infer var type of Eager mode is meaning less " "for var: " << name; @@ -155,77 +155,79 @@ class RuntimeInferVarTypeContext : public framework::InferVarTypeContext { bool IsDygraph() const override { return true; } protected: - bool HasVar(const std::string& name) const override { + bool HasVar(const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "HasVar is not supported in runtime InferVarType")); } const std::vector& InputVars( - const std::string& name) const override { + const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "InputVars is not supported in runtime InferVarType")); } const std::vector& OutputVars( - const std::string& name) const override { + const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "OutputVars is not supported in runtime InferVarType")); } framework::proto::VarType::Type GetVarType( - const std::string& name) const override { + const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not manipulate var in runtime InferVarType")); } - void SetVarType(const std::string& name, - framework::proto::VarType::Type type) override { + void SetVarType(const std::string& name UNUSED, + framework::proto::VarType::Type type UNUSED) override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not manipulate var in runtime InferVarType")); } framework::proto::VarType::Type GetVarDataType( - const std::string& name) const override { + const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not manipulate var in runtime InferVarType")); } - void SetVarDataType(const std::string& name, - framework::proto::VarType::Type type) override { + void SetVarDataType(const std::string& name UNUSED, + framework::proto::VarType::Type type UNUSED) override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not manipulate var in runtime InferVarType")); } std::vector GetVarDataTypes( - const std::string& name) const override { + const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "GetVarDataTypes is not supported in runtime InferVarType")); } - void SetVarDataTypes(const std::string& name, + void SetVarDataTypes(const std::string& name UNUSED, const std::vector& - multiple_data_type) override { + multiple_data_type UNUSED) override { PADDLE_THROW(platform::errors::PermissionDenied( "SetVarDataTypes is not supported in runtime InferVarType")); } - std::vector GetVarShape(const std::string& name) const override { + std::vector GetVarShape( + const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not handle Shape in runtime InferVarType")); } - void SetVarShape(const std::string& name, - const std::vector& dims) override { + void SetVarShape(const std::string& name UNUSED, + const std::vector& dims UNUSED) override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not handle Shape in runtime InferVarType")); } - int32_t GetVarLoDLevel(const std::string& name) const override { + int32_t GetVarLoDLevel(const std::string& name UNUSED) const override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not handle LoDLevel in runtime InferVarType")); } - void SetVarLoDLevel(const std::string& name, int32_t lod_level) override { + void SetVarLoDLevel(const std::string& name UNUSED, + int32_t lod_level UNUSED) override { PADDLE_THROW(platform::errors::PermissionDenied( "Do not handle LoDLevel in runtime InferVarType")); } diff --git a/paddle/fluid/operators/collective/c_concat_op.h b/paddle/fluid/operators/collective/c_concat_op.h index d9fe5c861a4..39bdc4c2740 100644 --- a/paddle/fluid/operators/collective/c_concat_op.h +++ b/paddle/fluid/operators/collective/c_concat_op.h @@ -28,7 +28,7 @@ namespace operators { template class CConcatOpCPUKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext& ctx UNUSED) const override { PADDLE_THROW(platform::errors::Unavailable( "Do not support c_concat for cpu kernel now.")); } diff --git a/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h b/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h index c8311bdab81..9b6a2c86897 100644 --- a/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h @@ -32,7 +32,7 @@ namespace operators { template class CSoftmaxWithCrossEntropyOpCPUKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext& ctx UNUSED) const override { PADDLE_THROW(platform::errors::Unavailable( "Do not support c_embedding for cpu kernel now.")); } diff --git a/paddle/fluid/operators/controlflow/feed_op.cc b/paddle/fluid/operators/controlflow/feed_op.cc index 698ca4e02e6..c2deeb41909 100644 --- a/paddle/fluid/operators/controlflow/feed_op.cc +++ b/paddle/fluid/operators/controlflow/feed_op.cc @@ -97,7 +97,7 @@ void FeedSparseCooTensorKernel(const Context& dev_ctx, } template -void FeedStringsKernel(const Context& dev_ctx, +void FeedStringsKernel(const Context& dev_ctx UNUSED, const phi::ExtendedTensor& x, int col, phi::ExtendedTensor* out) { diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index 4ef896ff01b..2f8db0e737a 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -159,7 +159,7 @@ class ElementwiseOp : public framework::OperatorWithKernel { } phi::KernelKey GetKernelTypeForVar( - const std::string &var_name, + const std::string &var_name UNUSED, const phi::DenseTensor &tensor, const phi::KernelKey &expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.dtype())) { @@ -305,7 +305,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { } phi::KernelKey GetKernelTypeForVar( - const std::string &var_name, + const std::string &var_name UNUSED, const phi::DenseTensor &tensor, const phi::KernelKey &expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.dtype())) { @@ -346,7 +346,7 @@ class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel { } phi::KernelKey GetKernelTypeForVar( - const std::string &var_name, + const std::string &var_name UNUSED, const phi::DenseTensor &tensor, const phi::KernelKey &expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.dtype())) { @@ -394,7 +394,7 @@ class ElementwiseOpDoubleGradWithoutDXDY } phi::KernelKey GetKernelTypeForVar( - const std::string &var_name, + const std::string &var_name UNUSED, const phi::DenseTensor &tensor, const phi::KernelKey &expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.dtype())) { @@ -442,7 +442,7 @@ class ElementwiseOpTripleGrad : public framework::OperatorWithKernel { } phi::KernelKey GetKernelTypeForVar( - const std::string &var_name, + const std::string &var_name UNUSED, const phi::DenseTensor &tensor, const phi::KernelKey &expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.dtype())) { diff --git a/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc index dba3087f1a0..ff6613d8ae9 100644 --- a/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/multi_gru_mkldnn_op.cc @@ -598,7 +598,7 @@ class MultiGRUHandler { } template - void reorderOutput(std::shared_ptr mem, int layer) { + void reorderOutput(std::shared_ptr mem, int layer UNUSED) { auto* data = mem->get_data_handle(); auto* hidden_data = phi::funcs::to_void_cast(hidden_->mutable_data(place_)); diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index e14ba8b1710..456abd55ef6 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -57,7 +57,8 @@ DeviceType Place2DeviceType(const platform::Place& place) { template typename std::enable_if::value, DevCtx*>::type -ConstructDevCtx(const phi::Place& p, /*unused*/ int stream_priority = 0) { +ConstructDevCtx(const phi::Place& p, + /*unused*/ int stream_priority UNUSED = 0) { return new DevCtx(p); } diff --git a/paddle/phi/kernels/cpu/margin_cross_entropy_kernel.cc b/paddle/phi/kernels/cpu/margin_cross_entropy_kernel.cc index 06d74471dd9..b967d723225 100644 --- a/paddle/phi/kernels/cpu/margin_cross_entropy_kernel.cc +++ b/paddle/phi/kernels/cpu/margin_cross_entropy_kernel.cc @@ -21,19 +21,19 @@ namespace phi { template -void MarginCrossEntropyKernel(const Context& dev_ctx, - const DenseTensor& logits, - const DenseTensor& labels, - bool return_softmax, - int ring_id, - int rank, - int nranks, - float margin1, - float margin2, - float margin3, - float scale, - DenseTensor* softmax, - DenseTensor* loss) { +void MarginCrossEntropyKernel(const Context& dev_ctx UNUSED, + const DenseTensor& logits UNUSED, + const DenseTensor& labels UNUSED, + bool return_softmax UNUSED, + int ring_id UNUSED, + int rank UNUSED, + int nranks UNUSED, + float margin1 UNUSED, + float margin2 UNUSED, + float margin3 UNUSED, + float scale UNUSED, + DenseTensor* softmax UNUSED, + DenseTensor* loss UNUSED) { PADDLE_THROW( errors::Unavailable("Do not support margin_cross_entropy for cpu kernel " "now.")); diff --git a/paddle/phi/kernels/cpu/rnn_functor.h b/paddle/phi/kernels/cpu/rnn_functor.h index 49b0c7655c6..511021c0086 100644 --- a/paddle/phi/kernels/cpu/rnn_functor.h +++ b/paddle/phi/kernels/cpu/rnn_functor.h @@ -79,7 +79,7 @@ void CreateMaskMatrix(const CPUContext& dev_ctx, template void ResetParameterVector(const std::vector& raw_params_vec, int num_layers, - int gate_num, + int gate_num UNUSED, bool is_bidirec, std::vector>* params_vec) { // the parameter raw seuquence is [FWhi, FWhh, BWhi, BWhh] * num_layers diff --git a/paddle/phi/kernels/cpu/sgd_kernel.cc b/paddle/phi/kernels/cpu/sgd_kernel.cc index e63e2f2637b..30a3a13a50d 100644 --- a/paddle/phi/kernels/cpu/sgd_kernel.cc +++ b/paddle/phi/kernels/cpu/sgd_kernel.cc @@ -117,10 +117,10 @@ void SGDDenseKernel(const Context& dev_ctx, const DenseTensor& param, const DenseTensor& learning_rate, const DenseTensor& grad, - const paddle::optional& master_param, - bool multi_precision, + const paddle::optional& master_param UNUSED, + bool multi_precision UNUSED, DenseTensor* param_out, - DenseTensor* master_param_out) { + DenseTensor* master_param_out UNUSED) { dev_ctx.template Alloc(param_out); sgd_dense_param_dense_grad_impl(param, learning_rate, grad, param_out); } @@ -131,24 +131,24 @@ void SGDDenseParamSparseGradKernel( const DenseTensor& param, const DenseTensor& learning_rate, const SelectedRows& grad, - const paddle::optional& master_param, - bool multi_precision, + const paddle::optional& master_param UNUSED, + bool multi_precision UNUSED, DenseTensor* param_out, - DenseTensor* master_param_out) { + DenseTensor* master_param_out UNUSED) { dev_ctx.template Alloc(param_out); sgd_dense_param_sparse_grad_impl(param, learning_rate, grad, param_out); } template void SGDSparseParamSparseGradKernel( - const Context& dev_ctx, + const Context& dev_ctx UNUSED, const SelectedRows& param, const DenseTensor& learning_rate, const SelectedRows& grad, - const paddle::optional& master_param, - bool multi_precision, + const paddle::optional& master_param UNUSED, + bool multi_precision UNUSED, SelectedRows* param_out, - SelectedRows* master_param_out) { + SelectedRows* master_param_out UNUSED) { // for distributed training, a sparse var may be empty, // just skip updating. if (grad.rows().size() == 0) { diff --git a/paddle/phi/kernels/cpu/uniform_kernel.cc b/paddle/phi/kernels/cpu/uniform_kernel.cc index 17ee5459188..d850dc5074e 100644 --- a/paddle/phi/kernels/cpu/uniform_kernel.cc +++ b/paddle/phi/kernels/cpu/uniform_kernel.cc @@ -22,7 +22,7 @@ namespace phi { template void UniformKernel(const Context &dev_ctx, const IntArray &shape, - DataType dtype, + DataType dtype UNUSED, const Scalar &min, const Scalar &max, int seed, diff --git a/paddle/phi/kernels/funcs/activation_functor.h b/paddle/phi/kernels/funcs/activation_functor.h index df63c22c946..b73ca44f7ca 100644 --- a/paddle/phi/kernels/funcs/activation_functor.h +++ b/paddle/phi/kernels/funcs/activation_functor.h @@ -104,7 +104,7 @@ struct SinGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * x.unaryExpr(Cosine()); } @@ -277,7 +277,7 @@ struct ReciprocalGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(-1) * out * out; } @@ -310,7 +310,7 @@ struct CosGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = -dout * x.unaryExpr(Sine()); } @@ -505,7 +505,7 @@ struct MishGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto sp = (x > static_cast(threshold)) .select(x, (static_cast(1) + x.exp()).log()); auto gsp = static_cast(1) - (-sp).exp(); @@ -544,7 +544,7 @@ struct STanhGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto a = static_cast(scale_a); auto b = static_cast(scale_b); auto temp = (a * x).tanh() * (a * x).tanh(); @@ -574,7 +574,7 @@ struct TanGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout / x.unaryExpr(Cosine()).square(); } @@ -620,7 +620,7 @@ struct SqrtGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = static_cast(0.5) * dout / out; } @@ -645,7 +645,7 @@ struct RsqrtGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = static_cast(-0.5) * dout * out * out * out; } @@ -697,7 +697,7 @@ struct SoftplusGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto x_beta = static_cast(beta) * x; dx.device(d) = (x_beta > static_cast(threshold)) @@ -816,7 +816,7 @@ struct SinhGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * x.unaryExpr(Cosh()); } @@ -831,7 +831,7 @@ struct CoshGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * x.unaryExpr(Sinh()); } @@ -867,7 +867,7 @@ struct AcosGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = -dout * static_cast(1) / (static_cast(1) - x.square()).sqrt(); } @@ -904,7 +904,7 @@ struct AsinGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (static_cast(1) - x.square()).sqrt(); } @@ -941,7 +941,7 @@ struct AtanGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (static_cast(1) + x.square()); } @@ -989,7 +989,7 @@ struct AcoshGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (x * x - static_cast(1)).sqrt(); } @@ -1026,7 +1026,7 @@ struct AsinhGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (x.square() + static_cast(1)).sqrt(); } @@ -1063,7 +1063,7 @@ struct AtanhGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (static_cast(1) - x.square()); } @@ -1160,12 +1160,12 @@ template struct ReluGradGradFunctor : public BaseActivationFunctor { template void operator()(const Device& dev, - const DenseTensor* X, + const DenseTensor* X UNUSED, const DenseTensor* Out, const DenseTensor* ddX, DenseTensor* ddOut, - DenseTensor* dOut, - DenseTensor* dX) const { + DenseTensor* dOut UNUSED, + DenseTensor* dX UNUSED) const { auto* d = dev.eigen_device(); auto ddx = EigenVector::Flatten( GET_DATA_SAFELY(ddX, "Input", "DDX", "ReluGradGrad")); @@ -1375,7 +1375,7 @@ struct HardTanhGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * ((x > static_cast(t_min)) * (x < static_cast(t_max))) .template cast(); @@ -1431,11 +1431,11 @@ struct LeakyReluGradGradFunctor : public BaseActivationFunctor { template void operator()(const Device& dev, const DenseTensor* X, - const DenseTensor* Out, + const DenseTensor* Out UNUSED, const DenseTensor* ddX, DenseTensor* ddOut, - DenseTensor* dOut, - DenseTensor* dX) const { + DenseTensor* dOut UNUSED, + DenseTensor* dX UNUSED) const { if (ddOut) { auto* d = dev.eigen_device(); auto ddx = EigenVector::Flatten( @@ -1479,7 +1479,7 @@ struct ThresholdedReluGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto th = static_cast(threshold); dx.device(d) = dout * (x > th).template cast(); } @@ -1511,7 +1511,7 @@ struct Relu6GradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { float threshold = 6; dx.device(d) = dout * ((out > static_cast(0)) * (out < static_cast(threshold))) @@ -1540,7 +1540,7 @@ struct TanhShrinkGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * (x.tanh() * x.tanh()); } @@ -1577,7 +1577,7 @@ struct HardShrinkGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto temp1 = x < static_cast(threshold * -1.f); auto temp2 = x > static_cast(threshold); dx.device(d) = dout * (temp1 || temp2).template cast(); @@ -1615,7 +1615,7 @@ struct SoftShrinkGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto lambdaT = static_cast(lambda); auto temp1 = (x > lambdaT).template cast(); auto temp2 = (x < -lambdaT).template cast(); @@ -1673,7 +1673,7 @@ struct ELUGradNegativeAlphaFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { // case 2: alpha < 0 // dx = dout, if x > 0 // dx = dout * (out + alpha), if x <=0 @@ -1742,7 +1742,7 @@ struct SiluGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto temp1 = static_cast(1) + (-x).exp(); // 1+e^(-x) auto temp2 = x * (-x).exp(); // x*e^(-x) dx.device(d) = dout * ((static_cast(1) / temp1) * @@ -1770,7 +1770,7 @@ struct SoftsignGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * (static_cast(1) / (static_cast(1) + x.abs()).square()); } @@ -1947,7 +1947,7 @@ struct LogSigmoidGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto temp = (-x).cwiseMax(static_cast(0)); // temp = max(-x, 0) dx.device(d) = dout * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp())); @@ -1984,7 +1984,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = dout * ((out > static_cast(0)) * (out < static_cast(1))) .template cast() * @@ -2012,7 +2012,7 @@ struct LogGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * (static_cast(1) / x); } @@ -2036,7 +2036,7 @@ struct Log2GradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (x * static_cast(log(2))); } @@ -2060,7 +2060,7 @@ struct Log10GradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(1) / (x * static_cast(log(10))); } @@ -2083,7 +2083,7 @@ struct Log1pGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * (static_cast(1) / (x + static_cast(1))); } @@ -2157,7 +2157,7 @@ struct HardSwishGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto tmp = ((x + static_cast(offset)) < static_cast(threshold)) .template cast(); dx.device(d) = @@ -2193,7 +2193,7 @@ struct SwishGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out fake_out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out fake_out UNUSED, dOut dout, dX dx) const { float beta = 1.0; auto temp1 = static_cast(1) / (static_cast(1) + (static_cast(-beta) * x).exp()); @@ -2229,7 +2229,7 @@ struct PowGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(factor) * x.pow(static_cast(factor) - static_cast(1)); } @@ -2279,7 +2279,8 @@ struct ZeroGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()( + Device d, X x UNUSED, Out out, dOut dout UNUSED, dX dx) const { dx.device(d) = static_cast(0) * out; } @@ -2384,7 +2385,7 @@ struct CELUGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { auto temp_a_pos = static_cast(alpha > 0); auto temp_a_neg = static_cast(alpha <= 0); auto temp_x_pos = (x > static_cast(0)).template cast(); diff --git a/paddle/phi/kernels/funcs/detection/poly_util.cc b/paddle/phi/kernels/funcs/detection/poly_util.cc deleted file mode 100644 index fd8037a82ea..00000000000 --- a/paddle/phi/kernels/funcs/detection/poly_util.cc +++ /dev/null @@ -1,138 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#ifndef POLY_UTIL_CC_ -#define POLY_UTIL_CC_ - -#include "paddle/phi/kernels/funcs/detection/poly_util.h" - -namespace phi { -namespace funcs { - -using phi::funcs::gpc_free_polygon; -using phi::funcs::gpc_polygon_clip; - -template -void Array2PointVec(const T* box, - const size_t box_size, - std::vector>* vec) { - size_t pts_num = box_size / 2; - (*vec).resize(pts_num); - for (size_t i = 0; i < pts_num; i++) { - (*vec).at(i).x = box[2 * i]; - (*vec).at(i).y = box[2 * i + 1]; - } -} - -template -void Array2Poly(const T* box, - const size_t box_size, - phi::funcs::gpc_polygon* poly) { - size_t pts_num = box_size / 2; - (*poly).num_contours = 1; - (*poly).hole = reinterpret_cast(malloc(sizeof(int))); - (*poly).hole[0] = 0; - (*poly).contour = - (phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list)); - (*poly).contour->num_vertices = pts_num; - (*poly).contour->vertex = - (phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num); - for (size_t i = 0; i < pts_num; ++i) { - (*poly).contour->vertex[i].x = box[2 * i]; - (*poly).contour->vertex[i].y = box[2 * i + 1]; - } -} - -template -void PointVec2Poly(const std::vector>& vec, - phi::funcs::gpc_polygon* poly) { - int pts_num = vec.size(); - (*poly).num_contours = 1; - (*poly).hole = reinterpret_cast(malloc(sizeof(int))); - (*poly).hole[0] = 0; - (*poly).contour = - (phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list)); - (*poly).contour->num_vertices = pts_num; - (*poly).contour->vertex = - (phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num); - for (size_t i = 0; i < pts_num; ++i) { - (*poly).contour->vertex[i].x = vec[i].x; - (*poly).contour->vertex[i].y = vec[i].y; - } -} - -template -void Poly2PointVec(const phi::funcs::gpc_vertex_list& contour, - std::vector>* vec) { - int pts_num = contour.num_vertices; - (*vec).resize(pts_num); - for (int i = 0; i < pts_num; i++) { - (*vec).at(i).x = contour.vertex[i].x; - (*vec).at(i).y = contour.vertex[i].y; - } -} - -template -T GetContourArea(const std::vector>& vec) { - size_t pts_num = vec.size(); - if (pts_num < 3) return T(0.); - T area = T(0.); - for (size_t i = 0; i < pts_num; ++i) { - area += vec[i].x * vec[(i + 1) % pts_num].y - - vec[i].y * vec[(i + 1) % pts_num].x; - } - return std::fabs(area / 2.0); -} - -template -T PolyArea(const T* box, const size_t box_size, const bool normalized) { - // If coordinate values are is invalid - // if area size <= 0, return 0. - std::vector> vec; - Array2PointVec(box, box_size, &vec); - return GetContourArea(vec); -} - -template -T PolyOverlapArea(const T* box1, - const T* box2, - const size_t box_size, - const bool normalized) { - phi::funcs::gpc_polygon poly1; - phi::funcs::gpc_polygon poly2; - Array2Poly(box1, box_size, &poly1); - Array2Poly(box2, box_size, &poly2); - phi::funcs::gpc_polygon respoly; - phi::funcs::gpc_op op = phi::funcs::GPC_INT; - phi::funcs::gpc_polygon_clip(op, &poly2, &poly1, &respoly); - - T inter_area = T(0.); - int contour_num = respoly.num_contours; - for (int i = 0; i < contour_num; ++i) { - std::vector> resvec; - Poly2PointVec(respoly.contour[i], &resvec); - // inter_area += std::fabs(cv::contourArea(resvec)) + 0.5f * - // (cv::arcLength(resvec, true)); - inter_area += GetContourArea(resvec); - } - - phi::funcs::gpc_free_polygon(&poly1); - phi::funcs::gpc_free_polygon(&poly2); - phi::funcs::gpc_free_polygon(&respoly); - return inter_area; -} - -} // namespace funcs -} // namespace phi - -#endif diff --git a/paddle/phi/kernels/funcs/detection/poly_util.h b/paddle/phi/kernels/funcs/detection/poly_util.h index 6d527d2d95f..608f373f3d6 100644 --- a/paddle/phi/kernels/funcs/detection/poly_util.h +++ b/paddle/phi/kernels/funcs/detection/poly_util.h @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#ifndef POLY_UTIL_H_ +#define POLY_UTIL_H_ + #include #include "paddle/phi/kernels/funcs/gpc.h" @@ -20,6 +23,9 @@ limitations under the License. */ namespace phi { namespace funcs { +using phi::funcs::gpc_free_polygon; +using phi::funcs::gpc_polygon_clip; + template class Point_ { public: @@ -43,33 +49,114 @@ class Point_ { template void Array2PointVec(const T* box, const size_t box_size, - std::vector>* vec); + std::vector>* vec) { + size_t pts_num = box_size / 2; + (*vec).resize(pts_num); + for (size_t i = 0; i < pts_num; i++) { + (*vec).at(i).x = box[2 * i]; + (*vec).at(i).y = box[2 * i + 1]; + } +} template void Array2Poly(const T* box, const size_t box_size, - phi::funcs::gpc_polygon* poly); + phi::funcs::gpc_polygon* poly) { + size_t pts_num = box_size / 2; + (*poly).num_contours = 1; + (*poly).hole = reinterpret_cast(malloc(sizeof(int))); + (*poly).hole[0] = 0; + (*poly).contour = + (phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list)); + (*poly).contour->num_vertices = pts_num; + (*poly).contour->vertex = + (phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num); + for (size_t i = 0; i < pts_num; ++i) { + (*poly).contour->vertex[i].x = box[2 * i]; + (*poly).contour->vertex[i].y = box[2 * i + 1]; + } +} template void PointVec2Poly(const std::vector>& vec, - phi::funcs::gpc_polygon* poly); + phi::funcs::gpc_polygon* poly) { + int pts_num = vec.size(); + (*poly).num_contours = 1; + (*poly).hole = reinterpret_cast(malloc(sizeof(int))); + (*poly).hole[0] = 0; + (*poly).contour = + (phi::funcs::gpc_vertex_list*)malloc(sizeof(phi::funcs::gpc_vertex_list)); + (*poly).contour->num_vertices = pts_num; + (*poly).contour->vertex = + (phi::funcs::gpc_vertex*)malloc(sizeof(phi::funcs::gpc_vertex) * pts_num); + for (size_t i = 0; i < pts_num; ++i) { + (*poly).contour->vertex[i].x = vec[i].x; + (*poly).contour->vertex[i].y = vec[i].y; + } +} template void Poly2PointVec(const phi::funcs::gpc_vertex_list& contour, - std::vector>* vec); + std::vector>* vec) { + int pts_num = contour.num_vertices; + (*vec).resize(pts_num); + for (int i = 0; i < pts_num; i++) { + (*vec).at(i).x = contour.vertex[i].x; + (*vec).at(i).y = contour.vertex[i].y; + } +} template -T GetContourArea(const std::vector>& vec); +T GetContourArea(const std::vector>& vec) { + size_t pts_num = vec.size(); + if (pts_num < 3) return T(0.); + T area = T(0.); + for (size_t i = 0; i < pts_num; ++i) { + area += vec[i].x * vec[(i + 1) % pts_num].y - + vec[i].y * vec[(i + 1) % pts_num].x; + } + return std::fabs(area / 2.0); +} template -T PolyArea(const T* box, const size_t box_size, const bool normalized); +T PolyArea(const T* box, const size_t box_size, const bool normalized UNUSED) { + // If coordinate values are is invalid + // if area size <= 0, return 0. + std::vector> vec; + Array2PointVec(box, box_size, &vec); + return GetContourArea(vec); +} template T PolyOverlapArea(const T* box1, const T* box2, const size_t box_size, - const bool normalized); + const bool normalized UNUSED) { + phi::funcs::gpc_polygon poly1; + phi::funcs::gpc_polygon poly2; + Array2Poly(box1, box_size, &poly1); + Array2Poly(box2, box_size, &poly2); + phi::funcs::gpc_polygon respoly; + phi::funcs::gpc_op op = phi::funcs::GPC_INT; + phi::funcs::gpc_polygon_clip(op, &poly2, &poly1, &respoly); + + T inter_area = T(0.); + int contour_num = respoly.num_contours; + for (int i = 0; i < contour_num; ++i) { + std::vector> resvec; + Poly2PointVec(respoly.contour[i], &resvec); + // inter_area += std::fabs(cv::contourArea(resvec)) + 0.5f * + // (cv::arcLength(resvec, true)); + inter_area += GetContourArea(resvec); + } + + phi::funcs::gpc_free_polygon(&poly1); + phi::funcs::gpc_free_polygon(&poly2); + phi::funcs::gpc_free_polygon(&respoly); + return inter_area; +} + } // namespace funcs } // namespace phi -#include "paddle/phi/kernels/funcs/detection/poly_util.cc" +#endif diff --git a/paddle/phi/kernels/legacy/cpu/uniform_kernel.cc b/paddle/phi/kernels/legacy/cpu/uniform_kernel.cc index ecea86874a7..3aa697b2409 100644 --- a/paddle/phi/kernels/legacy/cpu/uniform_kernel.cc +++ b/paddle/phi/kernels/legacy/cpu/uniform_kernel.cc @@ -20,7 +20,7 @@ namespace phi { template void UniformRawKernel(const Context &dev_ctx, const IntArray &shape, - DataType dtype, + DataType dtype UNUSED, const Scalar &min, const Scalar &max, int seed, -- GitLab