From c174aa220f13f88946a6397bcd9591fed4fbc96d Mon Sep 17 00:00:00 2001 From: Galaxy1458 <55453380+Galaxy1458@users.noreply.github.com> Date: Fri, 19 May 2023 13:46:16 +0800 Subject: [PATCH] test,test=develop (#53839) --- .../phi/kernels/impl/lu_unpack_grad_kernel_impl.h | 14 +++++++------- .../phi/kernels/impl/poisson_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/pool_grad_kernel_impl.h | 6 +++--- paddle/phi/kernels/impl/pool_kernel_impl.h | 4 ++-- paddle/phi/kernels/impl/qr_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/renorm_impl.h | 15 ++++++++------- paddle/phi/kernels/impl/rmsprop_kernel_impl.h | 3 ++- .../kernels/impl/sequence_pool_grad_kernel_impl.h | 6 +++--- paddle/phi/kernels/impl/svd_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/unfold_grad_kernel_impl.h | 2 +- .../phi/kernels/impl/warpctc_grad_kernel_impl.h | 4 ++-- paddle/phi/kernels/impl/warpctc_kernel_impl.h | 2 +- .../phi/kernels/impl/warprnnt_grad_kernel_impl.h | 8 ++++---- paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc | 2 +- paddle/phi/kernels/legacy/cpu/randint_kernel.cc | 2 +- .../phi/kernels/onednn/activation_grad_kernel.cc | 2 +- .../phi/kernels/onednn/conv_transpose_kernel.cc | 6 +++--- paddle/phi/kernels/onednn/expand_grad_kernel.cc | 4 ++-- paddle/phi/kernels/onednn/matmul_grad_kernel.cc | 2 +- 19 files changed, 45 insertions(+), 43 deletions(-) diff --git a/paddle/phi/kernels/impl/lu_unpack_grad_kernel_impl.h b/paddle/phi/kernels/impl/lu_unpack_grad_kernel_impl.h index 648e12bb26a..7098b745e6d 100644 --- a/paddle/phi/kernels/impl/lu_unpack_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/lu_unpack_grad_kernel_impl.h @@ -20,15 +20,15 @@ namespace phi { template void LUUnpackGradKernel(const Context& dev_ctx, - const DenseTensor& x, - const DenseTensor& pivots, - const DenseTensor& l, - const DenseTensor& u, - const DenseTensor& pmat, + const DenseTensor& x UNUSED, + const DenseTensor& pivots UNUSED, + const DenseTensor& l UNUSED, + const DenseTensor& u UNUSED, + const DenseTensor& pmat UNUSED, const DenseTensor& l_grad, const DenseTensor& u_grad, - bool unpack_ludata, - bool unpack_pivots, + bool unpack_ludata UNUSED, + bool unpack_pivots UNUSED, DenseTensor* x_grad) { dev_ctx.template Alloc(x_grad); diff --git a/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h b/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h index 17b6d7516e0..797039cf0b3 100644 --- a/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h @@ -21,7 +21,7 @@ namespace phi { template void PoissonGradKernel(const Context& ctx, - const DenseTensor& out_grad, + const DenseTensor& out_grad UNUSED, DenseTensor* x_grad) { ctx.template Alloc(x_grad); phi::funcs::SetConstant functor; diff --git a/paddle/phi/kernels/impl/pool_grad_kernel_impl.h b/paddle/phi/kernels/impl/pool_grad_kernel_impl.h index e53018f2291..e3e19370c86 100644 --- a/paddle/phi/kernels/impl/pool_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/pool_grad_kernel_impl.h @@ -142,7 +142,7 @@ void PoolGradRawKernel(const Context& ctx, template void MaxPoolWithIndexGradRawKernel(const Context& ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& mask, const DenseTensor& dout, const std::vector& kernel_size, @@ -192,7 +192,7 @@ void Pool2dGradKernel(const Context& ctx, const IntArray& kernel_size, const std::vector& strides, const std::vector& paddings, - bool ceil_mode, + bool ceil_mode UNUSED, bool exclusive, const std::string& data_format, const std::string& pooling_type, @@ -283,7 +283,7 @@ void Pool3dGradKernel(const Context& ctx, const std::vector& kernel_size, const std::vector& strides, const std::vector& paddings, - bool ceil_mode, + bool ceil_mode UNUSED, bool exclusive, const std::string& data_format, const std::string& pooling_type, diff --git a/paddle/phi/kernels/impl/pool_kernel_impl.h b/paddle/phi/kernels/impl/pool_kernel_impl.h index 7f08f0bd793..a2a6705a683 100644 --- a/paddle/phi/kernels/impl/pool_kernel_impl.h +++ b/paddle/phi/kernels/impl/pool_kernel_impl.h @@ -228,7 +228,7 @@ void Pool2dKernel(const Context& ctx, const IntArray& kernel_size, const std::vector& strides, const std::vector& paddings, - bool ceil_mode, + bool ceil_mode UNUSED, bool exclusive, const std::string& data_format, const std::string& pooling_type, @@ -279,7 +279,7 @@ void Pool3dKernel(const Context& ctx, const std::vector& kernel_size, const std::vector& strides, const std::vector& paddings, - bool ceil_mode, + bool ceil_mode UNUSED, bool exclusive, const std::string& data_format, const std::string& pooling_type, diff --git a/paddle/phi/kernels/impl/qr_grad_kernel_impl.h b/paddle/phi/kernels/impl/qr_grad_kernel_impl.h index ae0609766eb..d22eca3c733 100644 --- a/paddle/phi/kernels/impl/qr_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/qr_grad_kernel_impl.h @@ -88,7 +88,7 @@ void QrGradKernel(const Context& ctx, auto m_gt_n_case = [](const Context& ctx, const DenseTensor& dQ, const DenseTensor& dR, - const DenseTensor& A, + const DenseTensor& A UNUSED, const DenseTensor& Q, const DenseTensor& R) -> DenseTensor { // Hai-Jun Liao, Jin-Guo Liu, Lei Wang, Tao Xiang (2019). Differentiable diff --git a/paddle/phi/kernels/impl/renorm_impl.h b/paddle/phi/kernels/impl/renorm_impl.h index edd32473085..d206e29a69a 100644 --- a/paddle/phi/kernels/impl/renorm_impl.h +++ b/paddle/phi/kernels/impl/renorm_impl.h @@ -32,14 +32,14 @@ namespace phi { namespace funcs { template -void RenormFunc(const phi::CPUContext& ctx, +void RenormFunc(const phi::CPUContext& ctx UNUSED, const T* x_data, T* out_data, float p, int dim, float max_norm, int64_t dimension_each, - phi::DDim& input_dims, + const phi::DDim& input_dims, int64_t numel) { auto dim_size = input_dims.size(); int64_t dim_divisor = 1; @@ -83,7 +83,7 @@ void RenormFunc(const phi::CPUContext& ctx, } template -void RenormGradFunc(const phi::CPUContext& ctx, +void RenormGradFunc(const phi::CPUContext& ctx UNUSED, const T* x_data, const T* dout_data, T* dx_data, @@ -91,7 +91,7 @@ void RenormGradFunc(const phi::CPUContext& ctx, int dim, float max_norm, int64_t dimension_each, - phi::DDim& input_dims, + const phi::DDim& input_dims, int64_t numel) { auto dim_size = input_dims.size(); int64_t dim_divisor = 1; @@ -116,8 +116,9 @@ void RenormGradFunc(const phi::CPUContext& ctx, dim_power_sum[i] = std::pow(dim_value[i], (T)(-1.0 - 1.0 / p)) * -1 * max_norm; dim_value[i] = max_norm / temp; - } else + } else { dim_value[i] = 1.0; + } } index = dim_index = 0; for (int64_t i = 0; i < numel; i++) { @@ -271,7 +272,7 @@ void RenormFunc(const phi::GPUContext& ctx, int dim, float max_norm, int64_t dimension_each, - phi::DDim& input_dims, + const phi::DDim& input_dims, int64_t numel) { auto dim_size = input_dims.size(); DenseTensor pow_value, dim_value; @@ -307,7 +308,7 @@ void RenormGradFunc(const phi::GPUContext& ctx, int dim, float max_norm, int64_t dimension_each, - phi::DDim& input_dims, + const phi::DDim& input_dims, int64_t numel) { auto dim_size = input_dims.size(); int64_t dim_divisor = 1, pre_mul = 1; diff --git a/paddle/phi/kernels/impl/rmsprop_kernel_impl.h b/paddle/phi/kernels/impl/rmsprop_kernel_impl.h index c01fbaf4191..881931fce53 100644 --- a/paddle/phi/kernels/impl/rmsprop_kernel_impl.h +++ b/paddle/phi/kernels/impl/rmsprop_kernel_impl.h @@ -225,7 +225,8 @@ void RmspropSparseKernel(const Context &ctx, const DenseTensor &moment, const DenseTensor &learning_rate, const paddle::optional &mean_grad_opt, - const paddle::optional &master_param, + const paddle::optional &master_param + UNUSED, float epsilon_t, float decay_t, float momentum_t, diff --git a/paddle/phi/kernels/impl/sequence_pool_grad_kernel_impl.h b/paddle/phi/kernels/impl/sequence_pool_grad_kernel_impl.h index da9bdc1f1fd..4b4827ff9ee 100644 --- a/paddle/phi/kernels/impl/sequence_pool_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/sequence_pool_grad_kernel_impl.h @@ -20,12 +20,12 @@ namespace phi { template void SequencePoolGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const paddle::optional& max_index, const DenseTensor& out_grad, - bool is_test, + bool is_test UNUSED, const std::string& pooltype, - float pad_value, + float pad_value UNUSED, DenseTensor* x_grad) { const phi::DenseTensor* index = nullptr; if (pooltype == "MAX") { diff --git a/paddle/phi/kernels/impl/svd_grad_kernel_impl.h b/paddle/phi/kernels/impl/svd_grad_kernel_impl.h index f96073a21bb..13c86aa5761 100644 --- a/paddle/phi/kernels/impl/svd_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/svd_grad_kernel_impl.h @@ -67,7 +67,7 @@ static DenseTensor Unsqueeze(const DenseTensor& x, int axis = 0) { template void SvdGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& u, const DenseTensor& vh, const DenseTensor& s, diff --git a/paddle/phi/kernels/impl/unfold_grad_kernel_impl.h b/paddle/phi/kernels/impl/unfold_grad_kernel_impl.h index 78bd068041d..28f03420918 100644 --- a/paddle/phi/kernels/impl/unfold_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/unfold_grad_kernel_impl.h @@ -25,7 +25,7 @@ namespace phi { template void UnfoldGradKernel(const Context& ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& out_grad, const std::vector& kernel_sizes, const std::vector& strides, diff --git a/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h b/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h index 24b4f31a623..0c4d731b263 100644 --- a/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h @@ -29,11 +29,11 @@ namespace phi { template void WarpctcGradKernel(const Context& dev_ctx, - const DenseTensor& logits, + const DenseTensor& logits UNUSED, const paddle::optional& logits_length, const DenseTensor& warpctcgrad, const DenseTensor& loss_grad, - int blank, + int blank UNUSED, bool norm_by_times, DenseTensor* logits_grad) { dev_ctx.template Alloc(logits_grad); diff --git a/paddle/phi/kernels/impl/warpctc_kernel_impl.h b/paddle/phi/kernels/impl/warpctc_kernel_impl.h index b15380886f6..4b4bd6f5143 100644 --- a/paddle/phi/kernels/impl/warpctc_kernel_impl.h +++ b/paddle/phi/kernels/impl/warpctc_kernel_impl.h @@ -233,7 +233,7 @@ void WarpctcKernel(const Context& dev_ctx, const paddle::optional& logits_length, const paddle::optional& labels_length, int blank, - bool norm_by_times, + bool norm_by_times UNUSED, DenseTensor* loss, DenseTensor* warpctcgrad) { size_t num_sequences, sequence_width, max_sequence_length; diff --git a/paddle/phi/kernels/impl/warprnnt_grad_kernel_impl.h b/paddle/phi/kernels/impl/warprnnt_grad_kernel_impl.h index 62123a8e98c..4ca08a71956 100644 --- a/paddle/phi/kernels/impl/warprnnt_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/warprnnt_grad_kernel_impl.h @@ -25,12 +25,12 @@ namespace phi { template void WarprnntGradKernel(const Context& dev_ctx, - const DenseTensor& input, - const DenseTensor& input_lengths, + const DenseTensor& input UNUSED, + const DenseTensor& input_lengths UNUSED, const DenseTensor& warprnntgrad, const DenseTensor& loss_grad, - int blank, - float fastemit_lambda, + int blank UNUSED, + float fastemit_lambda UNUSED, DenseTensor* input_grad) { dev_ctx.template Alloc(input_grad); diff --git a/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc b/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc index 040a8559914..c31589bce90 100644 --- a/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc +++ b/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc @@ -67,7 +67,7 @@ void OneHotRawKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& depth, DataType dtype, - bool allow_out_of_range, + bool allow_out_of_range UNUSED, DenseTensor* out) { auto depth_v = depth.to(); auto out_dims = out->dims(); diff --git a/paddle/phi/kernels/legacy/cpu/randint_kernel.cc b/paddle/phi/kernels/legacy/cpu/randint_kernel.cc index f246ef8faf7..6b988f6294a 100644 --- a/paddle/phi/kernels/legacy/cpu/randint_kernel.cc +++ b/paddle/phi/kernels/legacy/cpu/randint_kernel.cc @@ -26,7 +26,7 @@ void RandintWithSeedKernel(const Context& dev_ctx, int low, int high, const IntArray& shape, - DataType dtype, + DataType dtype UNUSED, int seed, DenseTensor* out) { out->Resize(phi::make_ddim(shape.GetData())); diff --git a/paddle/phi/kernels/onednn/activation_grad_kernel.cc b/paddle/phi/kernels/onednn/activation_grad_kernel.cc index 9b8626254c7..7239d6dadf8 100644 --- a/paddle/phi/kernels/onednn/activation_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/activation_grad_kernel.cc @@ -217,7 +217,7 @@ void SwishGradKernel(const Context& dev_ctx, template void EluGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& out, const DenseTensor& dout, float alpha, diff --git a/paddle/phi/kernels/onednn/conv_transpose_kernel.cc b/paddle/phi/kernels/onednn/conv_transpose_kernel.cc index bb114f18abb..6314eaa8902 100644 --- a/paddle/phi/kernels/onednn/conv_transpose_kernel.cc +++ b/paddle/phi/kernels/onednn/conv_transpose_kernel.cc @@ -372,12 +372,12 @@ void Conv2dTransposeKernel(const Context& dev_ctx, const DenseTensor& filter, const std::vector& strides, const std::vector& paddings, - const std::vector& output_padding, - const IntArray& output_size, + const std::vector& output_padding UNUSED, + const IntArray& output_size UNUSED, const std::string& padding_algorithm, int groups, const std::vector& dilations, - const std::string& data_format, + const std::string& data_format UNUSED, DenseTensor* out) { PADDLE_ENFORCE_EQ(dev_ctx.GetPlace().GetType(), AllocationType::CPU, diff --git a/paddle/phi/kernels/onednn/expand_grad_kernel.cc b/paddle/phi/kernels/onednn/expand_grad_kernel.cc index 8650a90f064..4f4ef1fd544 100644 --- a/paddle/phi/kernels/onednn/expand_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/expand_grad_kernel.cc @@ -20,9 +20,9 @@ namespace phi { template void ExpandGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& out_grad, - const IntArray& shape, + const IntArray& shape UNUSED, DenseTensor* in_grad) { const auto& onednn_engine = dev_ctx.GetEngine(); diff --git a/paddle/phi/kernels/onednn/matmul_grad_kernel.cc b/paddle/phi/kernels/onednn/matmul_grad_kernel.cc index e1605ac3f2a..0dcc7195800 100644 --- a/paddle/phi/kernels/onednn/matmul_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/matmul_grad_kernel.cc @@ -84,7 +84,7 @@ template void ReduceSumForMatmulGradOutput(const OneDNNContext &dev_ctx, const DenseTensor *dx_tmp, DenseTensor *dx, - const std::vector &dx_dims, + const std::vector &dx_dims UNUSED, const std::vector &x_dims) { funcs::ReductionOneDNNHandler handler(dnnl::algorithm::reduction_sum, 0.0f, -- GitLab