diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 765bfefa427b64a2457b2355cd450f7bed367e38..7dbad9621f9cbc0957c3e7f34eca05a50ad80a61 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -154,7 +154,6 @@ if(NOT WIN32) -Wno-error=terminate # Warning in PADDLE_ENFORCE -Wno-error=int-in-bool-context # Warning in Eigen gcc 7.2 -Wimplicit-fallthrough=0 # Warning in tinyformat.h - -Wno-error=maybe-uninitialized # Warning in Paddle-Lite ${fsanitize}) if(WITH_IPU) diff --git a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc index 5a6796550619b912c7e6a6729b7688e9dd67de96..40453313d3c797f2580bcf358f544bfa8b57c86e 100644 --- a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc +++ b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc @@ -45,8 +45,8 @@ void DeleteWeightDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { if (n->IsOp()) { auto* op = n->Op(); if (op->Type() == "dequantize_linear") { - Node *weight_var_node, *dequantized_weight_var_node, *scale_var_node, - *calcu_op_node, *while_op_node; + Node *weight_var_node, *calcu_op_node, *while_op_node; + Node *dequantized_weight_var_node = nullptr, *scale_var_node = nullptr; // 1. Judge whether for dequant weight and find // weight_var_node/scale_var_node for (auto* input_node : n->inputs) { diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 7d6a2772a5679be8376935c3fc07993a3d041f20..db19e5c45d3dec71d13bb5e1bec7cb538b365176 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -602,6 +602,9 @@ class OpConverter { const std::string& layer_type, const std::vector& output_tensor_names, bool test_mode = false) { + if (layer == nullptr) { + return; + } size_t num_out = output_tensor_names.size(); std::string layer_name = layer_type + " (Output: "; for (size_t i = 0; i < num_out; i++) { diff --git a/paddle/fluid/inference/tensorrt/convert/unary_op.cc b/paddle/fluid/inference/tensorrt/convert/unary_op.cc index 3186bf5fd33d01f904f136badbe758e4d44293d0..ce9b7e5b2f653f71d24be05841862957de020503 100644 --- a/paddle/fluid/inference/tensorrt/convert/unary_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/unary_op.cc @@ -52,7 +52,7 @@ class UnaryOpConverter : public OpConverter { nvinfer1::ITensor* input_tensor = engine_->GetITensor(op_desc.Input("X")[0]); auto op_pair = ops.find(op_type_); - nvinfer1::ILayer* layer; + nvinfer1::ILayer* layer = nullptr; #if !IS_TRT_VERSION_GE(8500) nvinfer1::DataType org_type = input_tensor->getType(); bool cast = org_type == nvinfer1::DataType::kINT8 || diff --git a/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc b/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc index 687ce97068a35b14955357be4c31d8fc10145a9f..82b437b943cb43f7ef682355561e425642bd9ec0 100644 --- a/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc +++ b/paddle/fluid/operators/fused/fused_gemm_epilogue_op_xpu.cc @@ -142,7 +142,7 @@ class FusedGemmEpilogueXPUGradKernel : public framework::OpKernel { (reserve_space == NULL) ? (reinterpret_cast(NULL)) : (reinterpret_cast(reserve_space->data())); - XPUType* d_act_input_ptr; + XPUType* d_act_input_ptr = NULL; if (activation != "none") { d_act_input_ptr = RAII_GUARD.alloc_l3_or_gm(dout->numel()); dout_fc_ptr = d_act_input_ptr; diff --git a/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc b/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc index 035ab6c4b1adcb47de372bf03ed02b6f05c56303..d5960b02cf91ac833ebfa022eb7d44a605acdcd8 100644 --- a/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc @@ -34,7 +34,7 @@ void Pad3dGradKernel(const Context& dev_ctx, auto* d_out = &out_grad; auto* d_in = x_grad; - auto d_in_dims = d_in->dims(); + auto d_in_dims = vectorize(d_in->dims()); const T* d_out_data = d_out->data(); T* d_in_data = dev_ctx.template Alloc(d_in);