diff --git a/paddle/fluid/distributed/ps/table/common_graph_table.h b/paddle/fluid/distributed/ps/table/common_graph_table.h index f5288a3f90b121c2ef47b83da5f63229848ed1d6..142327368281adcae2f1c7aa71dee5ae5056d678 100644 --- a/paddle/fluid/distributed/ps/table/common_graph_table.h +++ b/paddle/fluid/distributed/ps/table/common_graph_table.h @@ -511,7 +511,7 @@ class GraphTable : public Table { } virtual ~GraphTable(); - virtual void *GetShard(size_t shard_idx) { return 0; } + virtual void *GetShard(size_t shard_idx UNUSED) { return 0; } static int32_t sparse_local_shard_num(uint32_t shard_num, uint32_t server_num) { @@ -624,15 +624,16 @@ class GraphTable : public Table { Node *find_node(GraphTableType table_type, int idx, uint64_t id); Node *find_node(GraphTableType table_type, uint64_t id); - virtual int32_t Pull(TableContext &context) { return 0; } // NOLINT - virtual int32_t Push(TableContext &context) { return 0; } // NOLINT + virtual int32_t Pull(TableContext &context UNUSED) { return 0; } // NOLINT + virtual int32_t Push(TableContext &context UNUSED) { return 0; } // NOLINT virtual int32_t clear_nodes(GraphTableType table_type, int idx); virtual void Clear() {} virtual int32_t Flush() { return 0; } - virtual int32_t Shrink(const std::string ¶m) { return 0; } + virtual int32_t Shrink(const std::string ¶m UNUSED) { return 0; } // 指定保存路径 - virtual int32_t Save(const std::string &path, const std::string &converter) { + virtual int32_t Save(const std::string &path UNUSED, + const std::string &converter UNUSED) { return 0; } virtual int32_t InitializeShard() { return 0; } diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index a96cb146f6833eb3362b60b0196a91168d6c9d0c..b95d996b88a94ec6718bd79b8c1fbf28d35301fa 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -928,9 +928,7 @@ if(WITH_DISTRIBUTE) fleet_executor) endif() elseif(WITH_PSLIB) - set(DISTRIBUTE_COMPILE_FLAGS - "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor" - ) + set(DISTRIBUTE_COMPILE_FLAGS "") if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new") endif() diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index c7dcdea0e487cc1271f8ba45ac79ca20ae2c578c..463c55a7c531748c3921c47926b73c79a94b1394 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -1178,7 +1178,7 @@ class ReductionOneDNNHandler const dnnl::engine engine, Place cpu_place, const DenseTensor* x, - const DenseTensor* out, + const DenseTensor* out UNUSED, std::vector out_tz, const dnnl::primitive_attr& attrs = NULL) : OneDNNHandlerNoCachingT(engine, cpu_place) { diff --git a/paddle/phi/core/utils/unroll_array_ops.h b/paddle/phi/core/utils/unroll_array_ops.h index 2e1d84080af485b787371bfc5b7c68c17b0e5820..665c2ae5128294f5d2ed724fefac9de5cbfa684b 100644 --- a/paddle/phi/core/utils/unroll_array_ops.h +++ b/paddle/phi/core/utils/unroll_array_ops.h @@ -87,7 +87,8 @@ struct UnrollCompare { template struct UnrollCompare { template - HOSTDEVICE inline constexpr static bool Run(const T *d1, const T *d2) { + HOSTDEVICE inline constexpr static bool Run(const T *d1 UNUSED, + const T *d2 UNUSED) { return true; } }; @@ -104,7 +105,7 @@ struct UnrollProduct { template struct UnrollProduct { template - HOSTDEVICE inline constexpr static T Run(const T *d) { + HOSTDEVICE inline constexpr static T Run(const T *d UNUSED) { return 1; } }; diff --git a/paddle/phi/kernels/complex_kernel.h b/paddle/phi/kernels/complex_kernel.h index ad66b890b3d5ab70aabaf9911b726a4b9d2261ef..4f1a1c40402dea53cc133ac4de0eca7b85381589 100644 --- a/paddle/phi/kernels/complex_kernel.h +++ b/paddle/phi/kernels/complex_kernel.h @@ -58,7 +58,7 @@ template < std::enable_if_t>::value && !std::is_same>::value, bool> = true> -DenseTensor Conj(const Context& dev_ctx, const DenseTensor& x) { +DenseTensor Conj(const Context& dev_ctx UNUSED, const DenseTensor& x) { return x; } diff --git a/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc b/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc index 1499f01002b6bca967e07a4ec22a5f46c8db5525..eacf1ad0cf3ad9555a125c2de3db1b6d171eff88 100644 --- a/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc @@ -30,7 +30,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx, const DenseTensor& loss_grad, bool soft_label, bool use_softmax, - bool numeric_stable_mode, + bool numeric_stable_mode UNUSED, int ignore_index, int axis, DenseTensor* logits_grad) { diff --git a/paddle/phi/kernels/cpu/full_kernel.cc b/paddle/phi/kernels/cpu/full_kernel.cc index d9ab771664a8f4fb8156346a1e1985977e1728f0..a295bfcc20c3a5b151ac6b8af39d33f48a847992 100644 --- a/paddle/phi/kernels/cpu/full_kernel.cc +++ b/paddle/phi/kernels/cpu/full_kernel.cc @@ -32,7 +32,7 @@ template void FullKernel(const Context& dev_ctx, const IntArray& shape, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { out->Resize(phi::make_ddim(shape.GetData())); FullValue(dev_ctx, out, val.to()); @@ -40,9 +40,9 @@ void FullKernel(const Context& dev_ctx, template void FullLikeKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { auto value = val.to(); using CommonType = typename std::common_type< diff --git a/paddle/phi/kernels/cpu/graph_send_recv_funcs.h b/paddle/phi/kernels/cpu/graph_send_recv_funcs.h index df6d9c87be0ed582568684aaa0869342f10e47c7..c67480cc9e33e220e8a45ad0d9752051966a3f0d 100644 --- a/paddle/phi/kernels/cpu/graph_send_recv_funcs.h +++ b/paddle/phi/kernels/cpu/graph_send_recv_funcs.h @@ -25,7 +25,7 @@ namespace phi { template struct GraphSendRecvSumFunctor { - void operator()(const bool& first_flag, + void operator()(const bool& first_flag UNUSED, const DenseTensor& src_slice, DenseTensor* dst_slice) { auto eigen_src = phi::EigenVector::Flatten(src_slice); diff --git a/paddle/phi/kernels/full_kernel.cc b/paddle/phi/kernels/full_kernel.cc index ce898210633b7d02c75bafa04c958f7f93a11e02..38beafbfa51b94e31d761f7c1c6652a75ed572e7 100644 --- a/paddle/phi/kernels/full_kernel.cc +++ b/paddle/phi/kernels/full_kernel.cc @@ -21,7 +21,7 @@ namespace phi { template void FullBatchSizeLikeKernel(const Context& dev_ctx, const DenseTensor& x, - const std::vector& shape, + const std::vector& shape UNUSED, const Scalar& val, DataType dtype, int x_batch_size_dim, diff --git a/paddle/phi/kernels/funcs/gather_scatter_functor.cc b/paddle/phi/kernels/funcs/gather_scatter_functor.cc index 57a8d679f346fd4c7be384bd82fd82e02a8620aa..e88dbf0f7ccdb39f8396970bf2d1eddff8be269f 100644 --- a/paddle/phi/kernels/funcs/gather_scatter_functor.cc +++ b/paddle/phi/kernels/funcs/gather_scatter_functor.cc @@ -181,11 +181,11 @@ void cpu_scatter_mul_kernel(phi::DenseTensor self, } template -void cpu_scatter_input_grad_kernel(phi::DenseTensor self, +void cpu_scatter_input_grad_kernel(phi::DenseTensor self UNUSED, int dim, const phi::DenseTensor& index, phi::DenseTensor output, - const phi::DeviceContext& ctx) { + const phi::DeviceContext& ctx UNUSED) { auto* index_data = index.data(); auto* output_data = output.data(); diff --git a/paddle/phi/kernels/funcs/math_function.cc b/paddle/phi/kernels/funcs/math_function.cc index d13ce8a2a90b52e76207ef07ef0c90b0c28f571b..1a61f48579188794d0e686a0db0ab06c78efe684 100644 --- a/paddle/phi/kernels/funcs/math_function.cc +++ b/paddle/phi/kernels/funcs/math_function.cc @@ -92,7 +92,7 @@ DEFINE_CPU_TRANS(6); template void TransposeNormal::operator()( - const DeviceContext& context, + const DeviceContext& context UNUSED, const phi::DenseTensor& in, phi::DenseTensor* out, const std::vector& axis) { diff --git a/paddle/phi/kernels/funcs/reduce_functor.h b/paddle/phi/kernels/funcs/reduce_functor.h index 80217520b126f52c2cdd27c9c438d03d09cc1475..e5e440d64d5419fbbe61252ade094cfd99634bdc 100644 --- a/paddle/phi/kernels/funcs/reduce_functor.h +++ b/paddle/phi/kernels/funcs/reduce_functor.h @@ -41,7 +41,7 @@ struct FrobeniusNormGradFunctor { DX* dx, DY* dy, const Dim& dim, - int size) { + int size UNUSED) { dx->device(place) = y->broadcast(dim); dx->device(place) = *dx + dx->constant(1e-12f); dx->device(place) = (*x / *dx) * (dy->broadcast(dim)); diff --git a/paddle/phi/kernels/funcs/segment_pooling.cc b/paddle/phi/kernels/funcs/segment_pooling.cc index c5d0944d8c1904509aad52c252eafb8882e18769..9dd800aba1a7bcb40350c3246300b7a483d81f88 100644 --- a/paddle/phi/kernels/funcs/segment_pooling.cc +++ b/paddle/phi/kernels/funcs/segment_pooling.cc @@ -31,7 +31,7 @@ class SegmentPoolFunctor { const DenseTensor& input, const DenseTensor& segments, DenseTensor* output, - DenseTensor* index, + DenseTensor* index UNUSED, const std::string pooltype = "SUM") { const IndexT* segment_ids = segments.data(); auto curent_id = segment_ids[0]; @@ -90,7 +90,7 @@ class SegmentPoolGradFunctor { const DenseTensor& out_grad, const DenseTensor& segments, DenseTensor* in_grad, - const paddle::optional& index, + const paddle::optional& index UNUSED, const std::string pooltype = "SUM") { const IndexT* segment_ids = segments.data(); auto& place = *dev_ctx.eigen_device(); diff --git a/paddle/phi/kernels/funcs/unique_functor.h b/paddle/phi/kernels/funcs/unique_functor.h index d704d2d60fa8d3745afeb242d26f8ffed491c827..806d7cca84851d8235eb3d2f17ec91a4172e28f5 100644 --- a/paddle/phi/kernels/funcs/unique_functor.h +++ b/paddle/phi/kernels/funcs/unique_functor.h @@ -190,7 +190,7 @@ static void UniqueFlattendTensor(const Context& context, } template -static ForwardIt UniqueDimImpl(const Context& context, +static ForwardIt UniqueDimImpl(const Context& context UNUSED, ForwardIt first, ForwardIt last, const std::vector& sorted_indices_vec, diff --git a/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h b/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h index c2229b50deee1a32abc9590e831002c7815f2ef3..184d149110e9bd242a2bb98af2ed6ff860422fd6 100644 --- a/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h +++ b/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h @@ -27,7 +27,7 @@ namespace phi { template struct ArgMaxFunctor { - void operator()(const Context& ctx, + void operator()(const Context& ctx UNUSED, const DenseTensor& in, DenseTensor* index_tensor, const int64_t& axis) { diff --git a/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h b/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h index 541de0cc162ccd7bc48e296db97819176644f14f..e9c54c7aca81aa859f3aeba4cfb430f089eae4e1 100644 --- a/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h @@ -21,8 +21,8 @@ namespace phi { template static void LerpGradFunction(const Context& ctx, - const DenseTensor& x, - const DenseTensor& y, + const DenseTensor& x UNUSED, + const DenseTensor& y UNUSED, const DenseTensor& weight, const DenseTensor& out, const DenseTensor& out_grad, diff --git a/paddle/phi/kernels/reverse_kernel.cc b/paddle/phi/kernels/reverse_kernel.cc index b2fe61ad41fc6db84fdf755d7d57a1f4f76cf5c7..771acacedf02439a24394eb814ad461e21ac8dc7 100644 --- a/paddle/phi/kernels/reverse_kernel.cc +++ b/paddle/phi/kernels/reverse_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void ReverseArrayKernel(const Context& dev_ctx, const TensorArray& x, - const IntArray& axis, + const IntArray& axis UNUSED, TensorArray* out) { PADDLE_ENFORCE_EQ( x.size(), diff --git a/paddle/phi/kernels/sparse/cpu/full_kernel.cc b/paddle/phi/kernels/sparse/cpu/full_kernel.cc index ac13327caeeaa4eee930b8f11dabbb6c16d4fcd2..5659bcf2159daea8cf4ff4264608f7a7626b6572 100644 --- a/paddle/phi/kernels/sparse/cpu/full_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/full_kernel.cc @@ -34,7 +34,7 @@ template void FullLikeCooKernel(const Context& dev_ctx, const SparseCooTensor& x, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, SparseCooTensor* out) { phi::Copy(dev_ctx, x.non_zero_indices(), diff --git a/paddle/phi/kernels/sparse/sparse_utils_kernel.h b/paddle/phi/kernels/sparse/sparse_utils_kernel.h index 8639f91469454d1625773b287f4a17a87f6e7ff8..241f3d8b0670a4539d9adc811dd43ad1e6224a13 100644 --- a/paddle/phi/kernels/sparse/sparse_utils_kernel.h +++ b/paddle/phi/kernels/sparse/sparse_utils_kernel.h @@ -144,14 +144,14 @@ DenseTensor CsrToDense(const Context& dev_ctx, const SparseCsrTensor& x) { } template -void ValuesCooKernel(const Context& dev_ctx, +void ValuesCooKernel(const Context& dev_ctx UNUSED, const SparseCooTensor& x, DenseTensor* out) { *out = x.non_zero_elements(); } template -void ValuesCsrKernel(const Context& dev_ctx, +void ValuesCsrKernel(const Context& dev_ctx UNUSED, const SparseCsrTensor& x, DenseTensor* out) { *out = x.non_zero_elements();