From 7e56147de9a570355d4f83315a0a707a6d064678 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Thu, 9 Mar 2023 10:54:49 +0800 Subject: [PATCH] fix maybe-uninitialized compiler warning in Linux (#51336) --- cmake/flags.cmake | 1 - .../framework/ir/delete_weight_dequant_linear_op_pass.cc | 4 ++-- paddle/fluid/inference/tensorrt/convert/op_converter.h | 3 +++ paddle/fluid/inference/tensorrt/convert/unary_op.cc | 2 +- paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc | 2 +- paddle/phi/kernels/xpu/pad3d_grad_kernel.cc | 2 +- 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 765bfefa427..7dbad9621f9 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -154,7 +154,6 @@ if(NOT WIN32) -Wno-error=terminate # Warning in PADDLE_ENFORCE -Wno-error=int-in-bool-context # Warning in Eigen gcc 7.2 -Wimplicit-fallthrough=0 # Warning in tinyformat.h - -Wno-error=maybe-uninitialized # Warning in Paddle-Lite ${fsanitize}) if(WITH_IPU) diff --git a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc index 5a679655061..40453313d3c 100644 --- a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc +++ b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc @@ -45,8 +45,8 @@ void DeleteWeightDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { if (n->IsOp()) { auto* op = n->Op(); if (op->Type() == "dequantize_linear") { - Node *weight_var_node, *dequantized_weight_var_node, *scale_var_node, - *calcu_op_node, *while_op_node; + Node *weight_var_node, *calcu_op_node, *while_op_node; + Node *dequantized_weight_var_node = nullptr, *scale_var_node = nullptr; // 1. Judge whether for dequant weight and find // weight_var_node/scale_var_node for (auto* input_node : n->inputs) { diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 7d6a2772a56..db19e5c45d3 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -602,6 +602,9 @@ class OpConverter { const std::string& layer_type, const std::vector& output_tensor_names, bool test_mode = false) { + if (layer == nullptr) { + return; + } size_t num_out = output_tensor_names.size(); std::string layer_name = layer_type + " (Output: "; for (size_t i = 0; i < num_out; i++) { diff --git a/paddle/fluid/inference/tensorrt/convert/unary_op.cc b/paddle/fluid/inference/tensorrt/convert/unary_op.cc index 3186bf5fd33..ce9b7e5b2f6 100644 --- a/paddle/fluid/inference/tensorrt/convert/unary_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/unary_op.cc @@ -52,7 +52,7 @@ class UnaryOpConverter : public OpConverter { nvinfer1::ITensor* input_tensor = engine_->GetITensor(op_desc.Input("X")[0]); auto op_pair = ops.find(op_type_); - nvinfer1::ILayer* layer; + nvinfer1::ILayer* layer = nullptr; #if !IS_TRT_VERSION_GE(8500) nvinfer1::DataType org_type = input_tensor->getType(); bool cast = org_type == nvinfer1::DataType::kINT8 || diff --git a/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc b/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc index 687ce97068a..82b437b943c 100644 --- a/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc +++ b/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc @@ -142,7 +142,7 @@ class FusedGemmEpilogueXPUGradKernel : public framework::OpKernel { (reserve_space == NULL) ? (reinterpret_cast(NULL)) : (reinterpret_cast(reserve_space->data())); - XPUType* d_act_input_ptr; + XPUType* d_act_input_ptr = NULL; if (activation != "none") { d_act_input_ptr = RAII_GUARD.alloc_l3_or_gm(dout->numel()); dout_fc_ptr = d_act_input_ptr; diff --git a/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc b/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc index 035ab6c4b1a..d5960b02cf9 100644 --- a/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc @@ -34,7 +34,7 @@ void Pad3dGradKernel(const Context& dev_ctx, auto* d_out = &out_grad; auto* d_in = x_grad; - auto d_in_dims = d_in->dims(); + auto d_in_dims = vectorize(d_in->dims()); const T* d_out_data = d_out->data(); T* d_in_data = dev_ctx.template Alloc(d_in); -- GitLab