From bafc346900904733bf23007339b90ddb3c837558 Mon Sep 17 00:00:00 2001 From: Galaxy1458 <55453380+Galaxy1458@users.noreply.github.com> Date: Tue, 9 May 2023 17:46:55 +0800 Subject: [PATCH] remove some [-Wunused-parameter]warning (#53617) * test,test=develop * test,test=develop * test,test=develop * test,test=develop --- .../collective/process_group_custom.cc | 2 +- .../distributed/ps/service/brpc_ps_server.h | 6 ++++-- .../ps/service/communicator/communicator.h | 20 +++++++++---------- .../ps/service/coordinator_client.h | 4 ++-- paddle/fluid/framework/ir/generate_pass.cc | 2 +- paddle/fluid/framework/trainer.h | 2 +- .../fused/mkldnn/fusion_rnn_mkldnn.h | 10 +++++----- paddle/fluid/pybind/process_group_utils.h | 4 ++-- paddle/phi/backends/onednn/onednn_helper.h | 3 ++- paddle/phi/backends/onednn/onednn_reuse.h | 2 +- paddle/phi/kernels/autotune/cache_base.h | 2 +- .../phi/kernels/cpu/viterbi_decode_kernel.cc | 2 +- .../kernels/impl/logsumexp_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/matmul_kernel_impl.h | 2 +- .../kernels/impl/searchsorted_kernel_impl.h | 8 ++++---- paddle/phi/kernels/impl/split_kernel_impl.h | 2 +- paddle/phi/kernels/sparse/cpu/full_kernel.cc | 2 +- .../phi/kernels/sparse/sparse_utils_kernel.h | 2 +- 18 files changed, 40 insertions(+), 37 deletions(-) diff --git a/paddle/fluid/distributed/collective/process_group_custom.cc b/paddle/fluid/distributed/collective/process_group_custom.cc index f0222b007c5..b6c7063fd6f 100644 --- a/paddle/fluid/distributed/collective/process_group_custom.cc +++ b/paddle/fluid/distributed/collective/process_group_custom.cc @@ -189,7 +189,7 @@ std::shared_ptr ProcessGroupCustom::Collective( std::vector& outputs, Fn fn, CommType op_type, - bool sync_op, + bool sync_op UNUSED, bool use_calc_stream) { const auto places = GetPlaceList(inputs); const auto key = GetKeyFromPlaces(places); diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_server.h b/paddle/fluid/distributed/ps/service/brpc_ps_server.h index 0343b3f8c58..321adf156c4 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_server.h +++ b/paddle/fluid/distributed/ps/service/brpc_ps_server.h @@ -227,8 +227,10 @@ class DownpourPServerBrpcClosure : public PServerClosure { PsRequestMessage *request(size_t i) { return &_requests[i]; } PsResponseMessage *response(size_t i) { return &_responses[i]; } brpc::Controller *cntl(size_t i) { return _cntls[i].get(); } - int check_response(size_t request_idx, int cmd_id) { return 1; } - int check_save_response(size_t request_idx, int cmd_id) { return 1; } + int check_response(size_t request_idx UNUSED, int cmd_id UNUSED) { return 1; } + int check_save_response(size_t request_idx UNUSED, int cmd_id UNUSED) { + return 1; + } private: std::atomic _waiting_num; diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.h b/paddle/fluid/distributed/ps/service/communicator/communicator.h index f3aa23a7782..643c91b5b05 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.h +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.h @@ -287,10 +287,10 @@ class Communicator { return {}; } virtual void SaveFLStrategy( - const std::unordered_map &fl_strategy) {} + const std::unordered_map &fl_strategy UNUSED) {} virtual void StartCoordinator( - const std::string &self_endpoint, - const std::vector &trainer_endpoints) {} + const std::string &self_endpoint UNUSED, + const std::vector &trainer_endpoints UNUSED) {} virtual ~Communicator() {} virtual void RpcProfilerControl(); @@ -337,13 +337,13 @@ class Communicator { virtual void BarrierTriggerDecrement() {} - virtual void BarrierTriggerReset(int init_counter) {} + virtual void BarrierTriggerReset(int init_counter UNUSED) {} virtual void InitEnvs() = 0; - virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx, - const RecvCtxMap &recv_varname_to_ctx, - Scope *recv_scope) {} + virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED, + const RecvCtxMap &recv_varname_to_ctx UNUSED, + Scope *recv_scope UNUSED) {} static Communicator *GetInstance() { return communicator_.get(); } @@ -682,9 +682,9 @@ class FLCommunicator : public GeoCommunicator { virtual void InitBrpcClient(const std::string &dist_desc, const std::vector &host_sign_list); - void InitImpl(const RpcCtxMap &send_varname_to_ctx, - const RecvCtxMap &recv_varname_to_ctx, - Scope *recv_scope) {} + void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED, + const RecvCtxMap &recv_varname_to_ctx UNUSED, + Scope *recv_scope UNUSED) {} void StartCoordinatorClient( const std::vector &trainer_endpoints); diff --git a/paddle/fluid/distributed/ps/service/coordinator_client.h b/paddle/fluid/distributed/ps/service/coordinator_client.h index 883799fe500..bd1f0f7754d 100644 --- a/paddle/fluid/distributed/ps/service/coordinator_client.h +++ b/paddle/fluid/distributed/ps/service/coordinator_client.h @@ -151,8 +151,8 @@ class CoordinatorService : public PsService { ::google::protobuf::Closure* done); int32_t SaveFLClientInfo(const CoordinatorReqMessage& request, - CoordinatorResMessage* response, - brpc::Controller* cntl) { + CoordinatorResMessage* response UNUSED, + brpc::Controller* cntl UNUSED) { _coordinator_service_handle->SaveFLClientInfo(request); return 0; } diff --git a/paddle/fluid/framework/ir/generate_pass.cc b/paddle/fluid/framework/ir/generate_pass.cc index dd58b090764..61c6ce5757a 100644 --- a/paddle/fluid/framework/ir/generate_pass.cc +++ b/paddle/fluid/framework/ir/generate_pass.cc @@ -26,7 +26,7 @@ class element_visitor { explicit element_visitor(int index) : index_(index) {} template - Attribute operator()(const T& attr) const { + Attribute operator()(const T& attr UNUSED) const { PADDLE_THROW(platform::errors::Unimplemented("Unimplemented operand.")); } diff --git a/paddle/fluid/framework/trainer.h b/paddle/fluid/framework/trainer.h index 455487541ab..bf69bed9d48 100644 --- a/paddle/fluid/framework/trainer.h +++ b/paddle/fluid/framework/trainer.h @@ -70,7 +70,7 @@ class TrainerBase { virtual Scope* GetWorkerScope(int thread_id) = 0; virtual void InitDumpEnv() = 0; virtual void DumpWork(int tid); - virtual void ResetDataset(Dataset* dataset_ptr) {} + virtual void ResetDataset(Dataset* dataset_ptr UNUSED) {} protected: virtual std::string GetDumpPath(int tid) = 0; diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h b/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h index f8854d3d7b4..db3967ceddf 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h +++ b/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h @@ -30,12 +30,12 @@ class RNNMKLDNNHandler : public phi::funcs::OneDNNHandlerT { public: RNNMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const phi::OneDNNContext& dev_ctx, - const dnnl::engine onednn_engine, + const dnnl::engine onednn_engine UNUSED, platform::Place cpu_place, - const phi::DenseTensor* input, - const phi::DenseTensor* weight_h, - const phi::DenseTensor* h0, - const bool is_reverse, + const phi::DenseTensor* input UNUSED, + const phi::DenseTensor* weight_h UNUSED, + const phi::DenseTensor* h0 UNUSED, + const bool is_reverse UNUSED, const int64_t N, const int64_t Ti, const int64_t IC, diff --git a/paddle/fluid/pybind/process_group_utils.h b/paddle/fluid/pybind/process_group_utils.h index a35962ce841..1a6b640b3a3 100644 --- a/paddle/fluid/pybind/process_group_utils.h +++ b/paddle/fluid/pybind/process_group_utils.h @@ -56,7 +56,7 @@ struct ConcatDenseTensor { void operator()(const platform::CustomDeviceContext &context, const std::vector &in, phi::DenseTensor *out, - int axis = 0) { + int axis UNUSED = 0) { auto *out_data = out->data(); auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace()); size_t offset = 0; @@ -80,7 +80,7 @@ struct SplitDenseTensor { void operator()(const platform::CustomDeviceContext &context, const phi::DenseTensor &in, std::vector *out, - int axis = 0) { + int axis UNUSED = 0) { auto *in_data = in.data(); auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace()); size_t offset = 0; diff --git a/paddle/phi/backends/onednn/onednn_helper.h b/paddle/phi/backends/onednn/onednn_helper.h index 9b5aa167a62..84e36a26ca4 100644 --- a/paddle/phi/backends/onednn/onednn_helper.h +++ b/paddle/phi/backends/onednn/onednn_helper.h @@ -191,7 +191,8 @@ inline void AppendKey(std::string* key, const std::vector& dims) { } template -inline std::string CreateKey(const OneDNNContext& dev_ctx, ArgTypes&&... args) { +inline std::string CreateKey(const OneDNNContext& dev_ctx UNUSED, + ArgTypes&&... args) { std::string key; key.reserve(64); using expand_type = int[]; diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index 463c55a7c53..330f0318a6e 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -676,7 +676,7 @@ class OneDNNHandlerNoCachingT { const dnnl::memory::desc& user_md, const dnnl::memory::desc& target_md, void* ptr, - bool is_persistent = false, + bool is_persistent UNUSED = false, std::function(const F*)> custom_reorder_func = {}) { std::shared_ptr target_memory_p; if (custom_reorder_func) { diff --git a/paddle/phi/kernels/autotune/cache_base.h b/paddle/phi/kernels/autotune/cache_base.h index 267c8ef3f68..798898f4dd7 100644 --- a/paddle/phi/kernels/autotune/cache_base.h +++ b/paddle/phi/kernels/autotune/cache_base.h @@ -24,7 +24,7 @@ DECLARE_int32(search_cache_max_number); -inline void HashCombine(std::size_t* seed) {} +inline void HashCombine(std::size_t* seed UNUSED) {} // combine hash value // https://stackoverflow.com/questions/2590677/how-do-i-combine-hash-values-in-c0x diff --git a/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc b/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc index 6342bfee4df..42fdd78c620 100644 --- a/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc +++ b/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc @@ -127,7 +127,7 @@ template struct BinaryOperation { - void operator()(const Context& dev_ctx, + void operator()(const Context& dev_ctx UNUSED, const DenseTensor& lhs, const DenseTensor& rhs, DenseTensor* output) { diff --git a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h index 0db6c12d4a0..ab65769ee69 100644 --- a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h @@ -38,7 +38,7 @@ struct LogsumexpGradFunctor { DX* dx, DY* dy, const Dim& dim, - int size) { + int size UNUSED) { using MT = typename phi::dtype::MPTypeTrait::Type; auto x_mt = (*x).template cast(); auto y_mt = (*y).template cast(); diff --git a/paddle/phi/kernels/impl/matmul_kernel_impl.h b/paddle/phi/kernels/impl/matmul_kernel_impl.h index acc7affc00e..14f50786d0c 100644 --- a/paddle/phi/kernels/impl/matmul_kernel_impl.h +++ b/paddle/phi/kernels/impl/matmul_kernel_impl.h @@ -103,7 +103,7 @@ void MatMulFunctionImplWithBlas( bool trans_x, bool trans_y, bool flag = false, - phi::funcs::MatmulPlanner* matmul_planner = nullptr) { + phi::funcs::MatmulPlanner* matmul_planner UNUSED = nullptr) { const int x_ndim = x_dims.size(); const int y_ndim = y_dims.size(); diff --git a/paddle/phi/kernels/impl/searchsorted_kernel_impl.h b/paddle/phi/kernels/impl/searchsorted_kernel_impl.h index 6c0891e59bc..2b28e3fed49 100644 --- a/paddle/phi/kernels/impl/searchsorted_kernel_impl.h +++ b/paddle/phi/kernels/impl/searchsorted_kernel_impl.h @@ -39,8 +39,8 @@ class GpuAndCpuSearchSortedCompute { return std::isnan(x); #endif } - static HOSTDEVICE bool IsNan(int x) { return false; } - static HOSTDEVICE bool IsNan(int64_t x) { return false; } + static HOSTDEVICE bool IsNan(int x UNUSED) { return false; } + static HOSTDEVICE bool IsNan(int64_t x UNUSED) { return false; } static HOSTDEVICE bool IsInf(float x) { #ifdef __NVCC__ @@ -56,8 +56,8 @@ class GpuAndCpuSearchSortedCompute { return std::isinf(x); #endif } - static HOSTDEVICE bool IsInf(int x) { return false; } - static HOSTDEVICE bool IsInf(int64_t x) { return false; } + static HOSTDEVICE bool IsInf(int x UNUSED) { return false; } + static HOSTDEVICE bool IsInf(int64_t x UNUSED) { return false; } HOSTDEVICE GpuAndCpuSearchSortedCompute(const T1* sequence_data, const T2* value_data, diff --git a/paddle/phi/kernels/impl/split_kernel_impl.h b/paddle/phi/kernels/impl/split_kernel_impl.h index 83968d913fe..2df379fcdb3 100644 --- a/paddle/phi/kernels/impl/split_kernel_impl.h +++ b/paddle/phi/kernels/impl/split_kernel_impl.h @@ -25,7 +25,7 @@ namespace phi { template void SplitKernel(const Context& dev_ctx, const DenseTensor& x, - const IntArray& sections, + const IntArray& sections UNUSED, const Scalar& axis_scalar, std::vector outs) { std::vector shape_refer; diff --git a/paddle/phi/kernels/sparse/cpu/full_kernel.cc b/paddle/phi/kernels/sparse/cpu/full_kernel.cc index 5659bcf2159..d9209544ec7 100644 --- a/paddle/phi/kernels/sparse/cpu/full_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/full_kernel.cc @@ -54,7 +54,7 @@ template void FullLikeCsrKernel(const Context& dev_ctx, const SparseCsrTensor& x, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, SparseCsrTensor* out) { phi::Copy(dev_ctx, x.non_zero_crows(), diff --git a/paddle/phi/kernels/sparse/sparse_utils_kernel.h b/paddle/phi/kernels/sparse/sparse_utils_kernel.h index 241f3d8b067..e06391d9404 100644 --- a/paddle/phi/kernels/sparse/sparse_utils_kernel.h +++ b/paddle/phi/kernels/sparse/sparse_utils_kernel.h @@ -158,7 +158,7 @@ void ValuesCsrKernel(const Context& dev_ctx UNUSED, } template -void IndicesCooKernel(const Context& dev_ctx, +void IndicesCooKernel(const Context& dev_ctx UNUSED, const SparseCooTensor& x, DenseTensor* out) { *out = x.indices(); -- GitLab