diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 0267f251e490f9a7970c3a4230e9c39cf462e5f8..6a0e1704bfa5dfe62346b3063ab9a169b3487ab9 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -170,13 +170,7 @@ if(NOT WIN32) if(NOT APPLE) if((${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER 8.0) OR (WITH_ROCM)) set(COMMON_FLAGS - ${COMMON_FLAGS} - -Wno-format-truncation # Warning in boost gcc 8.2 - -Wno-error=parentheses # Warning in boost gcc 8.2 - -Wno-error=catch-value # Warning in boost gcc 8.2 - -Wno-error=nonnull-compare # Warning in boost gcc 8.2 - -Wno-error=address # Warning in boost gcc 8.2 - -Wno-ignored-qualifiers # Warning in boost gcc 8.2 + ${COMMON_FLAGS} -Wno-ignored-qualifiers # Warning in Paddle-Lite -Wno-ignored-attributes # Warning in Eigen gcc 8.3 -Wno-parentheses # Warning in Eigen gcc 8.3 ) diff --git a/paddle/fluid/operators/dgc_op.h b/paddle/fluid/operators/dgc_op.h index 44121a9434c729eeafa4e9fd549b0df7839c48ab..2757b41dd7c5c2c6ac15fce50789b27ca34753b0 100644 --- a/paddle/fluid/operators/dgc_op.h +++ b/paddle/fluid/operators/dgc_op.h @@ -68,7 +68,7 @@ class DGCOpKernel : public framework::OpKernel { // nranks auto nranks_tensor = ctx.Input("nranks"); - const int nranks = static_cast(*nranks_tensor->data()); + const int nranks = static_cast(*nranks_tensor->data()); PADDLE_ENFORCE_GT(nranks, 1, platform::errors::PreconditionNotMet( diff --git a/paddle/fluid/operators/mlu/mlu_baseop.cc b/paddle/fluid/operators/mlu/mlu_baseop.cc index 04e3063dd70878b2146bbe809c9504efb7d013c7..d205bc2b2554d5649b92a2941cf37b3ca195edb8 100644 --- a/paddle/fluid/operators/mlu/mlu_baseop.cc +++ b/paddle/fluid/operators/mlu/mlu_baseop.cc @@ -1610,9 +1610,9 @@ MLURNNDesc::~MLURNNDesc() { const float alpha1_float, const float alpha2_float, const float beta_float) { - const int alpha1_int = static_cast(alpha1_float); - const int alpha2_int = static_cast(alpha2_float); - const int beta_int = static_cast(beta_float); + const int alpha1_int = static_cast(alpha1_float); + const int alpha2_int = static_cast(alpha2_float); + const int beta_int = static_cast(beta_float); const void* alpha1_ptr = static_cast(&alpha1_float); const void* alpha2_ptr = static_cast(&alpha2_float); diff --git a/paddle/fluid/operators/optimizers/dgc_momentum_op.h b/paddle/fluid/operators/optimizers/dgc_momentum_op.h index 7cb3ed8e80efa4332e93e1a613862117c5f6884c..bf9c9ff1e96ba8f4e4214104db6d355c56788c7f 100644 --- a/paddle/fluid/operators/optimizers/dgc_momentum_op.h +++ b/paddle/fluid/operators/optimizers/dgc_momentum_op.h @@ -39,7 +39,7 @@ class DGCMomentumKernel : public framework::OpKernel { // nranks auto nranks_tensor = context.Input("nranks"); - const int nranks = static_cast(*nranks_tensor->data()); + const int nranks = static_cast(*nranks_tensor->data()); PADDLE_ENFORCE_GT( nranks, 1, diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc b/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc index 68615a44e97c8cfb2e476c1995d93cc13845526f..e250b5585da06a50603d32baec2dd30ba9f51aa9 100644 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc +++ b/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc @@ -33,7 +33,7 @@ class XPULogsumexpKernel : public framework::OpKernel { const auto& input_dim_size = input->dims().size(); // The dims has full dim, set the reduce_all is True - reduce_all |= (static_cast(axis.size()) == input_dim_size); + reduce_all |= (static_cast(axis.size()) == input_dim_size); const T* input_data = input->data(); T* output_data = output->mutable_data(context.GetPlace()); diff --git a/paddle/phi/kernels/cpu/layer_norm_kernel.cc b/paddle/phi/kernels/cpu/layer_norm_kernel.cc index dbc3da0ca15acae080ab5b2983abbb48f9db1d92..7061d4f0ad7303f17e69ebf5808d1cf9cd4b7a7d 100644 --- a/paddle/phi/kernels/cpu/layer_norm_kernel.cc +++ b/paddle/phi/kernels/cpu/layer_norm_kernel.cc @@ -135,7 +135,7 @@ void LayerNormKernel(const Context& dev_ctx, scale ? scale->data() : nullptr, bias ? bias->data() : nullptr, static_cast(left), - static_cast(epsilon), + static_cast(epsilon), right); #endif } diff --git a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h index 7e5b5ca4f8d4e9a7c2da2fde0b367134d820aaa8..b7c1b2f9969a0881dcd3e1c9d7177296a9190df4 100644 --- a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h @@ -61,7 +61,7 @@ void LogsumexpGradKernel(const Context& dev_ctx, dev_ctx.template Alloc(in_grad); const auto input_dim_size = in.dims().size(); - reduce_all |= (static_cast(axis.size()) == input_dim_size); + reduce_all |= (static_cast(axis.size()) == input_dim_size); if (reduce_all) { auto x = phi::EigenVector::Flatten(in); diff --git a/paddle/phi/kernels/impl/logsumexp_kernel_impl.h b/paddle/phi/kernels/impl/logsumexp_kernel_impl.h index 30a118a1317baebb7616cfbb5085978c185d6df6..7f61accaafd031c6d2158765b58ada76387eecf6 100644 --- a/paddle/phi/kernels/impl/logsumexp_kernel_impl.h +++ b/paddle/phi/kernels/impl/logsumexp_kernel_impl.h @@ -71,7 +71,7 @@ void LogsumexpKernel(const Context& dev_ctx, const auto& input_dim_size = x.dims().size(); // The dims has full dim, set the reduce_all is True - reduce_all |= (static_cast(axis.size()) == input_dim_size); + reduce_all |= (static_cast(axis.size()) == input_dim_size); if (reduce_all) { // Flatten and reduce 1-D tensor