From 727fa27d42d7b2f228ce30593b540472bbff4da0 Mon Sep 17 00:00:00 2001 From: Galaxy1458 <55453380+Galaxy1458@users.noreply.github.com> Date: Tue, 9 May 2023 10:20:52 +0800 Subject: [PATCH] remove some [-Wunused-parameter]warning and WITH_DISTRIBUTE flag (#53532) * test,test=develop * test,test=develop --- .../fluid/distributed/ps/table/common_graph_table.h | 11 ++++++----- paddle/fluid/framework/CMakeLists.txt | 4 +--- paddle/phi/backends/onednn/onednn_reuse.h | 2 +- paddle/phi/core/utils/unroll_array_ops.h | 5 +++-- paddle/phi/kernels/complex_kernel.h | 2 +- paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/full_kernel.cc | 6 +++--- paddle/phi/kernels/cpu/graph_send_recv_funcs.h | 2 +- paddle/phi/kernels/full_kernel.cc | 2 +- paddle/phi/kernels/funcs/gather_scatter_functor.cc | 4 ++-- paddle/phi/kernels/funcs/math_function.cc | 2 +- paddle/phi/kernels/funcs/reduce_functor.h | 2 +- paddle/phi/kernels/funcs/segment_pooling.cc | 4 ++-- paddle/phi/kernels/funcs/unique_functor.h | 2 +- paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h | 2 +- paddle/phi/kernels/impl/lerp_grad_kernel_impl.h | 4 ++-- paddle/phi/kernels/reverse_kernel.cc | 2 +- paddle/phi/kernels/sparse/cpu/full_kernel.cc | 2 +- paddle/phi/kernels/sparse/sparse_utils_kernel.h | 4 ++-- 19 files changed, 32 insertions(+), 32 deletions(-) diff --git a/paddle/fluid/distributed/ps/table/common_graph_table.h b/paddle/fluid/distributed/ps/table/common_graph_table.h index f5288a3f90b..14232736828 100644 --- a/paddle/fluid/distributed/ps/table/common_graph_table.h +++ b/paddle/fluid/distributed/ps/table/common_graph_table.h @@ -511,7 +511,7 @@ class GraphTable : public Table { } virtual ~GraphTable(); - virtual void *GetShard(size_t shard_idx) { return 0; } + virtual void *GetShard(size_t shard_idx UNUSED) { return 0; } static int32_t sparse_local_shard_num(uint32_t shard_num, uint32_t server_num) { @@ -624,15 +624,16 @@ class GraphTable : public Table { Node *find_node(GraphTableType table_type, int idx, uint64_t id); Node *find_node(GraphTableType table_type, uint64_t id); - virtual int32_t Pull(TableContext &context) { return 0; } // NOLINT - virtual int32_t Push(TableContext &context) { return 0; } // NOLINT + virtual int32_t Pull(TableContext &context UNUSED) { return 0; } // NOLINT + virtual int32_t Push(TableContext &context UNUSED) { return 0; } // NOLINT virtual int32_t clear_nodes(GraphTableType table_type, int idx); virtual void Clear() {} virtual int32_t Flush() { return 0; } - virtual int32_t Shrink(const std::string ¶m) { return 0; } + virtual int32_t Shrink(const std::string ¶m UNUSED) { return 0; } // 指定保存路径 - virtual int32_t Save(const std::string &path, const std::string &converter) { + virtual int32_t Save(const std::string &path UNUSED, + const std::string &converter UNUSED) { return 0; } virtual int32_t InitializeShard() { return 0; } diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index a96cb146f68..b95d996b88a 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -928,9 +928,7 @@ if(WITH_DISTRIBUTE) fleet_executor) endif() elseif(WITH_PSLIB) - set(DISTRIBUTE_COMPILE_FLAGS - "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor" - ) + set(DISTRIBUTE_COMPILE_FLAGS "") if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new") endif() diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index c7dcdea0e48..463c55a7c53 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -1178,7 +1178,7 @@ class ReductionOneDNNHandler const dnnl::engine engine, Place cpu_place, const DenseTensor* x, - const DenseTensor* out, + const DenseTensor* out UNUSED, std::vector out_tz, const dnnl::primitive_attr& attrs = NULL) : OneDNNHandlerNoCachingT(engine, cpu_place) { diff --git a/paddle/phi/core/utils/unroll_array_ops.h b/paddle/phi/core/utils/unroll_array_ops.h index 2e1d84080af..665c2ae5128 100644 --- a/paddle/phi/core/utils/unroll_array_ops.h +++ b/paddle/phi/core/utils/unroll_array_ops.h @@ -87,7 +87,8 @@ struct UnrollCompare { template struct UnrollCompare { template - HOSTDEVICE inline constexpr static bool Run(const T *d1, const T *d2) { + HOSTDEVICE inline constexpr static bool Run(const T *d1 UNUSED, + const T *d2 UNUSED) { return true; } }; @@ -104,7 +105,7 @@ struct UnrollProduct { template struct UnrollProduct { template - HOSTDEVICE inline constexpr static T Run(const T *d) { + HOSTDEVICE inline constexpr static T Run(const T *d UNUSED) { return 1; } }; diff --git a/paddle/phi/kernels/complex_kernel.h b/paddle/phi/kernels/complex_kernel.h index ad66b890b3d..4f1a1c40402 100644 --- a/paddle/phi/kernels/complex_kernel.h +++ b/paddle/phi/kernels/complex_kernel.h @@ -58,7 +58,7 @@ template < std::enable_if_t>::value && !std::is_same>::value, bool> = true> -DenseTensor Conj(const Context& dev_ctx, const DenseTensor& x) { +DenseTensor Conj(const Context& dev_ctx UNUSED, const DenseTensor& x) { return x; } diff --git a/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc b/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc index 1499f01002b..eacf1ad0cf3 100644 --- a/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc @@ -30,7 +30,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx, const DenseTensor& loss_grad, bool soft_label, bool use_softmax, - bool numeric_stable_mode, + bool numeric_stable_mode UNUSED, int ignore_index, int axis, DenseTensor* logits_grad) { diff --git a/paddle/phi/kernels/cpu/full_kernel.cc b/paddle/phi/kernels/cpu/full_kernel.cc index d9ab771664a..a295bfcc20c 100644 --- a/paddle/phi/kernels/cpu/full_kernel.cc +++ b/paddle/phi/kernels/cpu/full_kernel.cc @@ -32,7 +32,7 @@ template void FullKernel(const Context& dev_ctx, const IntArray& shape, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { out->Resize(phi::make_ddim(shape.GetData())); FullValue(dev_ctx, out, val.to()); @@ -40,9 +40,9 @@ void FullKernel(const Context& dev_ctx, template void FullLikeKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { auto value = val.to(); using CommonType = typename std::common_type< diff --git a/paddle/phi/kernels/cpu/graph_send_recv_funcs.h b/paddle/phi/kernels/cpu/graph_send_recv_funcs.h index df6d9c87be0..c67480cc9e3 100644 --- a/paddle/phi/kernels/cpu/graph_send_recv_funcs.h +++ b/paddle/phi/kernels/cpu/graph_send_recv_funcs.h @@ -25,7 +25,7 @@ namespace phi { template struct GraphSendRecvSumFunctor { - void operator()(const bool& first_flag, + void operator()(const bool& first_flag UNUSED, const DenseTensor& src_slice, DenseTensor* dst_slice) { auto eigen_src = phi::EigenVector::Flatten(src_slice); diff --git a/paddle/phi/kernels/full_kernel.cc b/paddle/phi/kernels/full_kernel.cc index ce898210633..38beafbfa51 100644 --- a/paddle/phi/kernels/full_kernel.cc +++ b/paddle/phi/kernels/full_kernel.cc @@ -21,7 +21,7 @@ namespace phi { template void FullBatchSizeLikeKernel(const Context& dev_ctx, const DenseTensor& x, - const std::vector& shape, + const std::vector& shape UNUSED, const Scalar& val, DataType dtype, int x_batch_size_dim, diff --git a/paddle/phi/kernels/funcs/gather_scatter_functor.cc b/paddle/phi/kernels/funcs/gather_scatter_functor.cc index 57a8d679f34..e88dbf0f7cc 100644 --- a/paddle/phi/kernels/funcs/gather_scatter_functor.cc +++ b/paddle/phi/kernels/funcs/gather_scatter_functor.cc @@ -181,11 +181,11 @@ void cpu_scatter_mul_kernel(phi::DenseTensor self, } template -void cpu_scatter_input_grad_kernel(phi::DenseTensor self, +void cpu_scatter_input_grad_kernel(phi::DenseTensor self UNUSED, int dim, const phi::DenseTensor& index, phi::DenseTensor output, - const phi::DeviceContext& ctx) { + const phi::DeviceContext& ctx UNUSED) { auto* index_data = index.data(); auto* output_data = output.data(); diff --git a/paddle/phi/kernels/funcs/math_function.cc b/paddle/phi/kernels/funcs/math_function.cc index d13ce8a2a90..1a61f485791 100644 --- a/paddle/phi/kernels/funcs/math_function.cc +++ b/paddle/phi/kernels/funcs/math_function.cc @@ -92,7 +92,7 @@ DEFINE_CPU_TRANS(6); template void TransposeNormal::operator()( - const DeviceContext& context, + const DeviceContext& context UNUSED, const phi::DenseTensor& in, phi::DenseTensor* out, const std::vector& axis) { diff --git a/paddle/phi/kernels/funcs/reduce_functor.h b/paddle/phi/kernels/funcs/reduce_functor.h index 80217520b12..e5e440d64d5 100644 --- a/paddle/phi/kernels/funcs/reduce_functor.h +++ b/paddle/phi/kernels/funcs/reduce_functor.h @@ -41,7 +41,7 @@ struct FrobeniusNormGradFunctor { DX* dx, DY* dy, const Dim& dim, - int size) { + int size UNUSED) { dx->device(place) = y->broadcast(dim); dx->device(place) = *dx + dx->constant(1e-12f); dx->device(place) = (*x / *dx) * (dy->broadcast(dim)); diff --git a/paddle/phi/kernels/funcs/segment_pooling.cc b/paddle/phi/kernels/funcs/segment_pooling.cc index c5d0944d8c1..9dd800aba1a 100644 --- a/paddle/phi/kernels/funcs/segment_pooling.cc +++ b/paddle/phi/kernels/funcs/segment_pooling.cc @@ -31,7 +31,7 @@ class SegmentPoolFunctor { const DenseTensor& input, const DenseTensor& segments, DenseTensor* output, - DenseTensor* index, + DenseTensor* index UNUSED, const std::string pooltype = "SUM") { const IndexT* segment_ids = segments.data(); auto curent_id = segment_ids[0]; @@ -90,7 +90,7 @@ class SegmentPoolGradFunctor { const DenseTensor& out_grad, const DenseTensor& segments, DenseTensor* in_grad, - const paddle::optional& index, + const paddle::optional& index UNUSED, const std::string pooltype = "SUM") { const IndexT* segment_ids = segments.data(); auto& place = *dev_ctx.eigen_device(); diff --git a/paddle/phi/kernels/funcs/unique_functor.h b/paddle/phi/kernels/funcs/unique_functor.h index d704d2d60fa..806d7cca848 100644 --- a/paddle/phi/kernels/funcs/unique_functor.h +++ b/paddle/phi/kernels/funcs/unique_functor.h @@ -190,7 +190,7 @@ static void UniqueFlattendTensor(const Context& context, } template -static ForwardIt UniqueDimImpl(const Context& context, +static ForwardIt UniqueDimImpl(const Context& context UNUSED, ForwardIt first, ForwardIt last, const std::vector& sorted_indices_vec, diff --git a/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h b/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h index c2229b50dee..184d149110e 100644 --- a/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h +++ b/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h @@ -27,7 +27,7 @@ namespace phi { template struct ArgMaxFunctor { - void operator()(const Context& ctx, + void operator()(const Context& ctx UNUSED, const DenseTensor& in, DenseTensor* index_tensor, const int64_t& axis) { diff --git a/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h b/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h index 541de0cc162..e9c54c7aca8 100644 --- a/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h @@ -21,8 +21,8 @@ namespace phi { template static void LerpGradFunction(const Context& ctx, - const DenseTensor& x, - const DenseTensor& y, + const DenseTensor& x UNUSED, + const DenseTensor& y UNUSED, const DenseTensor& weight, const DenseTensor& out, const DenseTensor& out_grad, diff --git a/paddle/phi/kernels/reverse_kernel.cc b/paddle/phi/kernels/reverse_kernel.cc index b2fe61ad41f..771acacedf0 100644 --- a/paddle/phi/kernels/reverse_kernel.cc +++ b/paddle/phi/kernels/reverse_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void ReverseArrayKernel(const Context& dev_ctx, const TensorArray& x, - const IntArray& axis, + const IntArray& axis UNUSED, TensorArray* out) { PADDLE_ENFORCE_EQ( x.size(), diff --git a/paddle/phi/kernels/sparse/cpu/full_kernel.cc b/paddle/phi/kernels/sparse/cpu/full_kernel.cc index ac13327caee..5659bcf2159 100644 --- a/paddle/phi/kernels/sparse/cpu/full_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/full_kernel.cc @@ -34,7 +34,7 @@ template void FullLikeCooKernel(const Context& dev_ctx, const SparseCooTensor& x, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, SparseCooTensor* out) { phi::Copy(dev_ctx, x.non_zero_indices(), diff --git a/paddle/phi/kernels/sparse/sparse_utils_kernel.h b/paddle/phi/kernels/sparse/sparse_utils_kernel.h index 8639f914694..241f3d8b067 100644 --- a/paddle/phi/kernels/sparse/sparse_utils_kernel.h +++ b/paddle/phi/kernels/sparse/sparse_utils_kernel.h @@ -144,14 +144,14 @@ DenseTensor CsrToDense(const Context& dev_ctx, const SparseCsrTensor& x) { } template -void ValuesCooKernel(const Context& dev_ctx, +void ValuesCooKernel(const Context& dev_ctx UNUSED, const SparseCooTensor& x, DenseTensor* out) { *out = x.non_zero_elements(); } template -void ValuesCsrKernel(const Context& dev_ctx, +void ValuesCsrKernel(const Context& dev_ctx UNUSED, const SparseCsrTensor& x, DenseTensor* out) { *out = x.non_zero_elements(); -- GitLab