diff --git a/paddle/fluid/distributed/collective/process_group.h b/paddle/fluid/distributed/collective/process_group.h index 447fc5d1b3c7bf33c26312de72b03bfe73e6b3bc..eff17c9d4e061a1c360ee58d14765ebf6e203a03 100644 --- a/paddle/fluid/distributed/collective/process_group.h +++ b/paddle/fluid/distributed/collective/process_group.h @@ -486,7 +486,7 @@ class ProcessGroup { virtual std::shared_ptr Reduce( std::vector&, // NOLINT std::vector&, // NOLINT - const ReduceOptions& opts) { + const ReduceOptions& opts UNUSED) { PADDLE_THROW(phi::errors::InvalidArgument( "ProcessGroup%s does not support reduce", GetBackendName())); } diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_client.h b/paddle/fluid/distributed/ps/service/brpc_ps_client.h index bbaecc498a80ae172046a390a5613a82752d9e5d..d902824bfd60c993a813877e2cd8221fe13c0079 100755 --- a/paddle/fluid/distributed/ps/service/brpc_ps_client.h +++ b/paddle/fluid/distributed/ps/service/brpc_ps_client.h @@ -30,7 +30,7 @@ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor_util.h" - +#include "paddle/phi/core/macros.h" namespace brpc { class Channel; class Controller; @@ -63,7 +63,7 @@ class DownpourPsClientService : public PsService { PsResponseMessage *response, ::google::protobuf::Closure *done); - virtual void FLService(::google::protobuf::RpcController *controller, + virtual void FLService(::google::protobuf::RpcController *controller UNUSED, const CoordinatorReqMessage *request, CoordinatorResMessage *response, ::google::protobuf::Closure *done) { diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index 6b3c73dbdb39bd5a89de9b293940f1839a9040ba..57defbaee4dca730b1363747259567b1271d3c6b 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -457,8 +457,8 @@ inline void RunProgramAPI( } inline void RunProgramGradAPI( - const std::vector &x, - const std::vector ¶ms, + const std::vector &x UNUSED, + const std::vector ¶ms UNUSED, const std::vector &out_grad, const std::vector &step_scope, // NOLINT const paddle::framework::AttributeMap &attrs, @@ -610,8 +610,8 @@ class GradNodeRunProgram : public egr::GradNodeBase { egr::kSlotSmallVectorSize> operator()(paddle::small_vector, egr::kSlotSmallVectorSize> &grads, // NOLINT - bool create_graph, - bool is_new_grad) override { + bool create_graph UNUSED, + bool is_new_grad UNUSED) override { VLOG(3) << "Running Eager Backward Node: GradNodeRunProgram"; paddle::small_vector, egr::kSlotSmallVectorSize> hooked_grads = GradNodeRunProgram::ApplyGradientHooks(grads); diff --git a/paddle/fluid/framework/data_set.h b/paddle/fluid/framework/data_set.h index 1bc60993e36a0b9c0481b3b7b01e0652d970fdc2..9af5fbfc6b4a5905e1186df60e197b8525494281 100644 --- a/paddle/fluid/framework/data_set.h +++ b/paddle/fluid/framework/data_set.h @@ -58,7 +58,7 @@ class Dataset { const uint16_t start_sample_layer UNUSED, const bool with_hierachy UNUSED, const uint16_t seed_ UNUSED, - const uint16_t sample_slot) {} + const uint16_t sample_slot UNUSED) {} // set file list virtual void SetFileList(const std::vector& filelist) = 0; // set readers' num diff --git a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc index 2ba103ce0fcae243e8166b575d2f8c7f825b5c2c..a5274c5f7ae7c8fdbb092563ec290415a93b8791 100644 --- a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc @@ -44,7 +44,7 @@ class FCMKLDNNHandler const phi::DenseTensor* x, const phi::DenseTensor* weights, const phi::DenseTensor* bias, - phi::DenseTensor* out, + phi::DenseTensor* out UNUSED, const int in_num_col_dims, dnnl::engine onednn_engine, platform::Place cpu_place) diff --git a/paddle/phi/kernels/empty_kernel.cc b/paddle/phi/kernels/empty_kernel.cc index 0877a8e24468c13e111295161aca325956eb54e1..8df5e9a543eb2554539fc1bc8c0d66eb5e968991 100644 --- a/paddle/phi/kernels/empty_kernel.cc +++ b/paddle/phi/kernels/empty_kernel.cc @@ -30,7 +30,7 @@ void EmptyKernel(const Context& dev_ctx, template void EmptyLikeKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, DataType dtype UNUSED, DenseTensor* out) { dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/flatten_kernel.cc b/paddle/phi/kernels/flatten_kernel.cc index 939e270613650871776c9565077eb7c65213b80c..67d56f1c46aa3440abb41c4aee89b81fea4bd722 100644 --- a/paddle/phi/kernels/flatten_kernel.cc +++ b/paddle/phi/kernels/flatten_kernel.cc @@ -25,8 +25,8 @@ namespace phi { template void FlattenInferKernel(const Context& dev_ctx, const DenseTensor& x, - int start_axis, - int stop_axis, + int start_axis UNUSED, + int stop_axis UNUSED, DenseTensor* out) { dev_ctx.Alloc(out, x.dtype()); auto out_dims = out->dims(); @@ -43,7 +43,7 @@ void FlattenKernel(const Context& dev_ctx, int start_axis, int stop_axis, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { FlattenInferKernel(dev_ctx, x, start_axis, stop_axis, out); } diff --git a/paddle/phi/kernels/funcs/activation_functor.h b/paddle/phi/kernels/funcs/activation_functor.h index 6a3554318e5e6668314257adb3cf1129aa6857e1..aefa6539286339259e7ff5c776f50d620c164cf4 100644 --- a/paddle/phi/kernels/funcs/activation_functor.h +++ b/paddle/phi/kernels/funcs/activation_functor.h @@ -1198,7 +1198,7 @@ struct TanhGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = dout * (static_cast(1) - out * out); } @@ -1794,7 +1794,7 @@ struct SigmoidGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = dout * out * (static_cast(1) - out); } diff --git a/paddle/phi/kernels/funcs/compound_functors.h b/paddle/phi/kernels/funcs/compound_functors.h index 0fd3fd0e932fc7e0cfbccd1bb45f389335515ed0..020c6dcd87f30d46c3413a9b4b3e9ddff52e1d38 100644 --- a/paddle/phi/kernels/funcs/compound_functors.h +++ b/paddle/phi/kernels/funcs/compound_functors.h @@ -96,12 +96,12 @@ struct BinaryCompoundGradDyFunctor { unary_fun_(unary_fun), d_unary_fun_(d_unary_fun) {} - inline HOSTDEVICE T Recompute(T x, T y, T out, T dout) { + inline HOSTDEVICE T Recompute(T x, T y, T out UNUSED, T dout) { return dout * d_binary_fun_.Dy(x, unary_fun_(y)) * d_unary_fun_.UseX(y); } inline HOSTDEVICE T - UseIntermediateOut(T x, T y, T intermediate_out, T out, T dout) { + UseIntermediateOut(T x, T y, T intermediate_out, T out UNUSED, T dout) { if (InPlace) { return dout * d_binary_fun_.Dy(x, intermediate_out) * d_unary_fun_.UseOut(intermediate_out); @@ -111,7 +111,9 @@ struct BinaryCompoundGradDyFunctor { } } - inline HOSTDEVICE T GetIntermediateOut(T x, T y) { return unary_fun_(y); } + inline HOSTDEVICE T GetIntermediateOut(T x UNUSED, T y) { + return unary_fun_(y); + } private: DBinaryFun d_binary_fun_; diff --git a/paddle/phi/kernels/funcs/detail/activation_functions.h b/paddle/phi/kernels/funcs/detail/activation_functions.h index 26be2a83280c323a8aa196f93f642cb38a0b6596..f1352df226094b9d13247d5626258dbde20a6f1c 100644 --- a/paddle/phi/kernels/funcs/detail/activation_functions.h +++ b/paddle/phi/kernels/funcs/detail/activation_functions.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/phi/backends/cpu/cpu_info.h" #include "paddle/phi/core/hostdevice.h" - +#include "paddle/phi/core/macros.h" namespace phi { namespace funcs { namespace detail { @@ -104,7 +104,7 @@ DEVICE T TanhV2(const T a) { namespace backward { template -DEVICE T Identity(const T a, const T b) { +DEVICE T Identity(const T a, const T b UNUSED) { return a; } diff --git a/paddle/phi/kernels/funcs/gather.h b/paddle/phi/kernels/funcs/gather.h index f1ab1a16f1224bab005024b8ee77ed01ab88b590..50f7f4fa0322cb89b4b65e552f1e5e2f8aac858f 100644 --- a/paddle/phi/kernels/funcs/gather.h +++ b/paddle/phi/kernels/funcs/gather.h @@ -21,8 +21,8 @@ limitations under the License. */ #include "paddle/phi/common/place.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/macros.h" #include "paddle/phi/kernels/funcs/math_function.h" - namespace phi { namespace funcs { @@ -34,7 +34,7 @@ namespace funcs { * return: output tensor */ template -void CPUGather(const phi::CPUContext& ctx, +void CPUGather(const phi::CPUContext& ctx UNUSED, const DenseTensor& src, const DenseTensor& index, DenseTensor* output) { @@ -95,7 +95,7 @@ void CPUGather(const phi::CPUContext& ctx, } template -void CPUGatherNd(const phi::CPUContext& ctx, +void CPUGatherNd(const phi::CPUContext& ctx UNUSED, const DenseTensor& input, const DenseTensor& index, DenseTensor* output) { diff --git a/paddle/phi/kernels/funcs/jit/helper.h b/paddle/phi/kernels/funcs/jit/helper.h index ff233710a5cf375cad96450771c64065590114ef..7e3394dffd4a2a617e627a7475be40f7b5f18d13 100644 --- a/paddle/phi/kernels/funcs/jit/helper.h +++ b/paddle/phi/kernels/funcs/jit/helper.h @@ -74,7 +74,7 @@ inline typename std::enable_if< !std::is_same::value || !std::is_same::value, const Kernel*>::type -GetJitCode(const typename KernelTuple::attr_type& attr) { +GetJitCode(const typename KernelTuple::attr_type& attr UNUSED) { return nullptr; } diff --git a/paddle/phi/kernels/funcs/reduce_functor.h b/paddle/phi/kernels/funcs/reduce_functor.h index f1d3772cc25a52b522fb5c46bdc305109a042cf2..596be9bc33058558472d7e9d3dc9cedf979db8b4 100644 --- a/paddle/phi/kernels/funcs/reduce_functor.h +++ b/paddle/phi/kernels/funcs/reduce_functor.h @@ -153,7 +153,7 @@ struct ProdGradFunctor { DX* dx, DY* dy, const Dim& dim, - int size) { + int size UNUSED) { dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse(); } }; diff --git a/paddle/phi/kernels/funcs/scatter.h b/paddle/phi/kernels/funcs/scatter.h index 7c23a35072cb851828e8501379ddcee75d81bb03..d4305885418374d41e7f39989038da200bf944a9 100644 --- a/paddle/phi/kernels/funcs/scatter.h +++ b/paddle/phi/kernels/funcs/scatter.h @@ -47,7 +47,7 @@ elementwise_inner_add(const phi::CPUContext& ctx, template typename std::enable_if::value>::type -elementwise_inner_add(const phi::CPUContext& ctx, +elementwise_inner_add(const phi::CPUContext& ctx UNUSED, const T* src_pointer, T* dst_pointer, size_t src_index, diff --git a/paddle/phi/kernels/funcs/strided_memcpy.h b/paddle/phi/kernels/funcs/strided_memcpy.h index d80cf7b2e242122474e53fba76c8137d4d2ae375..19a542bd2b5e9bdfc4b8e9f866a00cc9e2427ebf 100644 --- a/paddle/phi/kernels/funcs/strided_memcpy.h +++ b/paddle/phi/kernels/funcs/strided_memcpy.h @@ -12,9 +12,9 @@ limitations under the License. */ #pragma once #include -#include "paddle/phi/kernels/funcs/detail/strided_memcpy.h" - #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/macros.h" +#include "paddle/phi/kernels/funcs/detail/strided_memcpy.h" namespace phi { class CPUContext; @@ -65,7 +65,7 @@ inline void CopyWithContext(const Context& ctx, } template <> -inline void CopyWithContext(const phi::CPUContext& ctx, +inline void CopyWithContext(const phi::CPUContext& ctx UNUSED, const Place& dst_place, void* dst, const Place& src_place, diff --git a/paddle/phi/kernels/squeeze_grad_kernel.cc b/paddle/phi/kernels/squeeze_grad_kernel.cc index 0fe2f79073430d6f1cabff002c9c448d565413e9..473acf9d7a1d1516069cbd556118cadbf24544e5 100644 --- a/paddle/phi/kernels/squeeze_grad_kernel.cc +++ b/paddle/phi/kernels/squeeze_grad_kernel.cc @@ -23,7 +23,7 @@ template void SqueezeGradKernel(const Context& dev_ctx, const DenseTensor& xshape, const DenseTensor& dout, - const IntArray& axes, + const IntArray& axes UNUSED, DenseTensor* dx) { auto xshape_dims = xshape.dims(); auto x_dims = phi::slice_ddim(xshape_dims, 1, xshape_dims.size()); diff --git a/paddle/phi/kernels/squeeze_kernel.cc b/paddle/phi/kernels/squeeze_kernel.cc index a0b72381601d60098caf422a00b0998759754b3d..d495b040921b59942b22ccda3b8a184efb78d56d 100644 --- a/paddle/phi/kernels/squeeze_kernel.cc +++ b/paddle/phi/kernels/squeeze_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void SqueezeInferKernel(const Context& dev_ctx, const DenseTensor& x, - const IntArray& axes, + const IntArray& axes UNUSED, DenseTensor* out) { auto out_dims = out->dims(); dev_ctx.template Alloc(out); @@ -39,7 +39,7 @@ void SqueezeKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& axes, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { SqueezeInferKernel(dev_ctx, x, axes, out); } diff --git a/paddle/phi/kernels/unsqueeze_kernel.cc b/paddle/phi/kernels/unsqueeze_kernel.cc index 4354b09c753b103308e19e5f41473abb33ad11c0..c08c31da4ef0ce3c1717df79d979a0bde3078726 100644 --- a/paddle/phi/kernels/unsqueeze_kernel.cc +++ b/paddle/phi/kernels/unsqueeze_kernel.cc @@ -44,7 +44,7 @@ void UnsqueezeKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& axes, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { UnsqueezeInferKernel(dev_ctx, x, axes, out); } } // namespace phi