From 0fac328106f3d7062eceafc48bcc1e8ae92c464b Mon Sep 17 00:00:00 2001 From: Galaxy1458 <55453380+Galaxy1458@users.noreply.github.com> Date: Thu, 27 Apr 2023 10:45:15 +0800 Subject: [PATCH] remove some [-Wunused-parameter] warning (#53365) * test,test=develop * test,test=develop * test,test=develop * test,test=develop * test,test=develop * test,test=develop * test,test=develop * test,test=develop --- paddle/fluid/distributed/collective/process_group.h | 2 +- paddle/fluid/distributed/ps/service/brpc_ps_client.h | 4 ++-- paddle/fluid/eager/to_static/run_program_op_node.h | 8 ++++---- paddle/fluid/framework/data_set.h | 2 +- paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc | 2 +- paddle/phi/kernels/empty_kernel.cc | 2 +- paddle/phi/kernels/flatten_kernel.cc | 6 +++--- paddle/phi/kernels/funcs/activation_functor.h | 4 ++-- paddle/phi/kernels/funcs/compound_functors.h | 8 +++++--- paddle/phi/kernels/funcs/detail/activation_functions.h | 4 ++-- paddle/phi/kernels/funcs/gather.h | 6 +++--- paddle/phi/kernels/funcs/jit/helper.h | 2 +- paddle/phi/kernels/funcs/reduce_functor.h | 2 +- paddle/phi/kernels/funcs/scatter.h | 2 +- paddle/phi/kernels/funcs/strided_memcpy.h | 6 +++--- paddle/phi/kernels/squeeze_grad_kernel.cc | 2 +- paddle/phi/kernels/squeeze_kernel.cc | 4 ++-- paddle/phi/kernels/unsqueeze_kernel.cc | 2 +- 18 files changed, 35 insertions(+), 33 deletions(-) diff --git a/paddle/fluid/distributed/collective/process_group.h b/paddle/fluid/distributed/collective/process_group.h index 447fc5d1b3c..eff17c9d4e0 100644 --- a/paddle/fluid/distributed/collective/process_group.h +++ b/paddle/fluid/distributed/collective/process_group.h @@ -486,7 +486,7 @@ class ProcessGroup { virtual std::shared_ptr Reduce( std::vector&, // NOLINT std::vector&, // NOLINT - const ReduceOptions& opts) { + const ReduceOptions& opts UNUSED) { PADDLE_THROW(phi::errors::InvalidArgument( "ProcessGroup%s does not support reduce", GetBackendName())); } diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_client.h b/paddle/fluid/distributed/ps/service/brpc_ps_client.h index bbaecc498a8..d902824bfd6 100755 --- a/paddle/fluid/distributed/ps/service/brpc_ps_client.h +++ b/paddle/fluid/distributed/ps/service/brpc_ps_client.h @@ -30,7 +30,7 @@ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor_util.h" - +#include "paddle/phi/core/macros.h" namespace brpc { class Channel; class Controller; @@ -63,7 +63,7 @@ class DownpourPsClientService : public PsService { PsResponseMessage *response, ::google::protobuf::Closure *done); - virtual void FLService(::google::protobuf::RpcController *controller, + virtual void FLService(::google::protobuf::RpcController *controller UNUSED, const CoordinatorReqMessage *request, CoordinatorResMessage *response, ::google::protobuf::Closure *done) { diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index 6b3c73dbdb3..57defbaee4d 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -457,8 +457,8 @@ inline void RunProgramAPI( } inline void RunProgramGradAPI( - const std::vector &x, - const std::vector ¶ms, + const std::vector &x UNUSED, + const std::vector ¶ms UNUSED, const std::vector &out_grad, const std::vector &step_scope, // NOLINT const paddle::framework::AttributeMap &attrs, @@ -610,8 +610,8 @@ class GradNodeRunProgram : public egr::GradNodeBase { egr::kSlotSmallVectorSize> operator()(paddle::small_vector, egr::kSlotSmallVectorSize> &grads, // NOLINT - bool create_graph, - bool is_new_grad) override { + bool create_graph UNUSED, + bool is_new_grad UNUSED) override { VLOG(3) << "Running Eager Backward Node: GradNodeRunProgram"; paddle::small_vector, egr::kSlotSmallVectorSize> hooked_grads = GradNodeRunProgram::ApplyGradientHooks(grads); diff --git a/paddle/fluid/framework/data_set.h b/paddle/fluid/framework/data_set.h index 1bc60993e36..9af5fbfc6b4 100644 --- a/paddle/fluid/framework/data_set.h +++ b/paddle/fluid/framework/data_set.h @@ -58,7 +58,7 @@ class Dataset { const uint16_t start_sample_layer UNUSED, const bool with_hierachy UNUSED, const uint16_t seed_ UNUSED, - const uint16_t sample_slot) {} + const uint16_t sample_slot UNUSED) {} // set file list virtual void SetFileList(const std::vector& filelist) = 0; // set readers' num diff --git a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc index 2ba103ce0fc..a5274c5f7ae 100644 --- a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc @@ -44,7 +44,7 @@ class FCMKLDNNHandler const phi::DenseTensor* x, const phi::DenseTensor* weights, const phi::DenseTensor* bias, - phi::DenseTensor* out, + phi::DenseTensor* out UNUSED, const int in_num_col_dims, dnnl::engine onednn_engine, platform::Place cpu_place) diff --git a/paddle/phi/kernels/empty_kernel.cc b/paddle/phi/kernels/empty_kernel.cc index 0877a8e2446..8df5e9a543e 100644 --- a/paddle/phi/kernels/empty_kernel.cc +++ b/paddle/phi/kernels/empty_kernel.cc @@ -30,7 +30,7 @@ void EmptyKernel(const Context& dev_ctx, template void EmptyLikeKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, DataType dtype UNUSED, DenseTensor* out) { dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/flatten_kernel.cc b/paddle/phi/kernels/flatten_kernel.cc index 939e2706136..67d56f1c46a 100644 --- a/paddle/phi/kernels/flatten_kernel.cc +++ b/paddle/phi/kernels/flatten_kernel.cc @@ -25,8 +25,8 @@ namespace phi { template void FlattenInferKernel(const Context& dev_ctx, const DenseTensor& x, - int start_axis, - int stop_axis, + int start_axis UNUSED, + int stop_axis UNUSED, DenseTensor* out) { dev_ctx.Alloc(out, x.dtype()); auto out_dims = out->dims(); @@ -43,7 +43,7 @@ void FlattenKernel(const Context& dev_ctx, int start_axis, int stop_axis, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { FlattenInferKernel(dev_ctx, x, start_axis, stop_axis, out); } diff --git a/paddle/phi/kernels/funcs/activation_functor.h b/paddle/phi/kernels/funcs/activation_functor.h index 6a3554318e5..aefa6539286 100644 --- a/paddle/phi/kernels/funcs/activation_functor.h +++ b/paddle/phi/kernels/funcs/activation_functor.h @@ -1198,7 +1198,7 @@ struct TanhGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = dout * (static_cast(1) - out * out); } @@ -1794,7 +1794,7 @@ struct SigmoidGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = dout * out * (static_cast(1) - out); } diff --git a/paddle/phi/kernels/funcs/compound_functors.h b/paddle/phi/kernels/funcs/compound_functors.h index 0fd3fd0e932..020c6dcd87f 100644 --- a/paddle/phi/kernels/funcs/compound_functors.h +++ b/paddle/phi/kernels/funcs/compound_functors.h @@ -96,12 +96,12 @@ struct BinaryCompoundGradDyFunctor { unary_fun_(unary_fun), d_unary_fun_(d_unary_fun) {} - inline HOSTDEVICE T Recompute(T x, T y, T out, T dout) { + inline HOSTDEVICE T Recompute(T x, T y, T out UNUSED, T dout) { return dout * d_binary_fun_.Dy(x, unary_fun_(y)) * d_unary_fun_.UseX(y); } inline HOSTDEVICE T - UseIntermediateOut(T x, T y, T intermediate_out, T out, T dout) { + UseIntermediateOut(T x, T y, T intermediate_out, T out UNUSED, T dout) { if (InPlace) { return dout * d_binary_fun_.Dy(x, intermediate_out) * d_unary_fun_.UseOut(intermediate_out); @@ -111,7 +111,9 @@ struct BinaryCompoundGradDyFunctor { } } - inline HOSTDEVICE T GetIntermediateOut(T x, T y) { return unary_fun_(y); } + inline HOSTDEVICE T GetIntermediateOut(T x UNUSED, T y) { + return unary_fun_(y); + } private: DBinaryFun d_binary_fun_; diff --git a/paddle/phi/kernels/funcs/detail/activation_functions.h b/paddle/phi/kernels/funcs/detail/activation_functions.h index 26be2a83280..f1352df2260 100644 --- a/paddle/phi/kernels/funcs/detail/activation_functions.h +++ b/paddle/phi/kernels/funcs/detail/activation_functions.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/phi/backends/cpu/cpu_info.h" #include "paddle/phi/core/hostdevice.h" - +#include "paddle/phi/core/macros.h" namespace phi { namespace funcs { namespace detail { @@ -104,7 +104,7 @@ DEVICE T TanhV2(const T a) { namespace backward { template -DEVICE T Identity(const T a, const T b) { +DEVICE T Identity(const T a, const T b UNUSED) { return a; } diff --git a/paddle/phi/kernels/funcs/gather.h b/paddle/phi/kernels/funcs/gather.h index f1ab1a16f12..50f7f4fa032 100644 --- a/paddle/phi/kernels/funcs/gather.h +++ b/paddle/phi/kernels/funcs/gather.h @@ -21,8 +21,8 @@ limitations under the License. */ #include "paddle/phi/common/place.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/macros.h" #include "paddle/phi/kernels/funcs/math_function.h" - namespace phi { namespace funcs { @@ -34,7 +34,7 @@ namespace funcs { * return: output tensor */ template -void CPUGather(const phi::CPUContext& ctx, +void CPUGather(const phi::CPUContext& ctx UNUSED, const DenseTensor& src, const DenseTensor& index, DenseTensor* output) { @@ -95,7 +95,7 @@ void CPUGather(const phi::CPUContext& ctx, } template -void CPUGatherNd(const phi::CPUContext& ctx, +void CPUGatherNd(const phi::CPUContext& ctx UNUSED, const DenseTensor& input, const DenseTensor& index, DenseTensor* output) { diff --git a/paddle/phi/kernels/funcs/jit/helper.h b/paddle/phi/kernels/funcs/jit/helper.h index ff233710a5c..7e3394dffd4 100644 --- a/paddle/phi/kernels/funcs/jit/helper.h +++ b/paddle/phi/kernels/funcs/jit/helper.h @@ -74,7 +74,7 @@ inline typename std::enable_if< !std::is_same::value || !std::is_same::value, const Kernel*>::type -GetJitCode(const typename KernelTuple::attr_type& attr) { +GetJitCode(const typename KernelTuple::attr_type& attr UNUSED) { return nullptr; } diff --git a/paddle/phi/kernels/funcs/reduce_functor.h b/paddle/phi/kernels/funcs/reduce_functor.h index f1d3772cc25..596be9bc330 100644 --- a/paddle/phi/kernels/funcs/reduce_functor.h +++ b/paddle/phi/kernels/funcs/reduce_functor.h @@ -153,7 +153,7 @@ struct ProdGradFunctor { DX* dx, DY* dy, const Dim& dim, - int size) { + int size UNUSED) { dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse(); } }; diff --git a/paddle/phi/kernels/funcs/scatter.h b/paddle/phi/kernels/funcs/scatter.h index 7c23a35072c..d4305885418 100644 --- a/paddle/phi/kernels/funcs/scatter.h +++ b/paddle/phi/kernels/funcs/scatter.h @@ -47,7 +47,7 @@ elementwise_inner_add(const phi::CPUContext& ctx, template typename std::enable_if::value>::type -elementwise_inner_add(const phi::CPUContext& ctx, +elementwise_inner_add(const phi::CPUContext& ctx UNUSED, const T* src_pointer, T* dst_pointer, size_t src_index, diff --git a/paddle/phi/kernels/funcs/strided_memcpy.h b/paddle/phi/kernels/funcs/strided_memcpy.h index d80cf7b2e24..19a542bd2b5 100644 --- a/paddle/phi/kernels/funcs/strided_memcpy.h +++ b/paddle/phi/kernels/funcs/strided_memcpy.h @@ -12,9 +12,9 @@ limitations under the License. */ #pragma once #include -#include "paddle/phi/kernels/funcs/detail/strided_memcpy.h" - #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/macros.h" +#include "paddle/phi/kernels/funcs/detail/strided_memcpy.h" namespace phi { class CPUContext; @@ -65,7 +65,7 @@ inline void CopyWithContext(const Context& ctx, } template <> -inline void CopyWithContext(const phi::CPUContext& ctx, +inline void CopyWithContext(const phi::CPUContext& ctx UNUSED, const Place& dst_place, void* dst, const Place& src_place, diff --git a/paddle/phi/kernels/squeeze_grad_kernel.cc b/paddle/phi/kernels/squeeze_grad_kernel.cc index 0fe2f790734..473acf9d7a1 100644 --- a/paddle/phi/kernels/squeeze_grad_kernel.cc +++ b/paddle/phi/kernels/squeeze_grad_kernel.cc @@ -23,7 +23,7 @@ template void SqueezeGradKernel(const Context& dev_ctx, const DenseTensor& xshape, const DenseTensor& dout, - const IntArray& axes, + const IntArray& axes UNUSED, DenseTensor* dx) { auto xshape_dims = xshape.dims(); auto x_dims = phi::slice_ddim(xshape_dims, 1, xshape_dims.size()); diff --git a/paddle/phi/kernels/squeeze_kernel.cc b/paddle/phi/kernels/squeeze_kernel.cc index a0b72381601..d495b040921 100644 --- a/paddle/phi/kernels/squeeze_kernel.cc +++ b/paddle/phi/kernels/squeeze_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void SqueezeInferKernel(const Context& dev_ctx, const DenseTensor& x, - const IntArray& axes, + const IntArray& axes UNUSED, DenseTensor* out) { auto out_dims = out->dims(); dev_ctx.template Alloc(out); @@ -39,7 +39,7 @@ void SqueezeKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& axes, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { SqueezeInferKernel(dev_ctx, x, axes, out); } diff --git a/paddle/phi/kernels/unsqueeze_kernel.cc b/paddle/phi/kernels/unsqueeze_kernel.cc index 4354b09c753..c08c31da4ef 100644 --- a/paddle/phi/kernels/unsqueeze_kernel.cc +++ b/paddle/phi/kernels/unsqueeze_kernel.cc @@ -44,7 +44,7 @@ void UnsqueezeKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& axes, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { UnsqueezeInferKernel(dev_ctx, x, axes, out); } } // namespace phi -- GitLab