From f72d52e79b79c7b98f52d35dba9c63d2e9ddba8c Mon Sep 17 00:00:00 2001 From: Wilber Date: Wed, 22 Sep 2021 15:21:11 +0800 Subject: [PATCH] [cherry-pick] trt engine dtor when the last predictor dtor (#35881) * cherry-pick 32842 --- cmake/external/lite.cmake | 2 +- paddle/fluid/framework/ir/fc_fuse_pass.cc | 2 +- .../framework/ir/map_matmul_to_mul_pass.cc | 8 +- paddle/fluid/framework/op_desc.h | 2 +- .../fluid/inference/api/analysis_predictor.cc | 12 ++- paddle/fluid/inference/tensorrt/op_teller.cc | 96 +++++++++++++++++++ paddle/scripts/paddle_build.sh | 4 +- 7 files changed, 116 insertions(+), 10 deletions(-) diff --git a/cmake/external/lite.cmake b/cmake/external/lite.cmake index e213068377b..33f17f0f3f6 100644 --- a/cmake/external/lite.cmake +++ b/cmake/external/lite.cmake @@ -42,7 +42,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) set(LITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/lite) if(NOT LITE_GIT_TAG) - set(LITE_GIT_TAG d3a3a6931b6d22d504d21ba32b3ae972770e9204) + set(LITE_GIT_TAG 1c4698c6efd9a5f57a4f8369bd5b6374166f5ba4) endif() if(NOT CUDA_ARCH_NAME) diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index 0bb2782b373..4510aea925e 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -135,7 +135,7 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const { } // Create an FC Node. - OpDesc desc; + OpDesc desc(mul->Op()->Block()); desc.SetType("fc"); // Set inputs of fc diff --git a/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc b/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc index b8666c1c73e..864055cfa36 100644 --- a/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc +++ b/paddle/fluid/framework/ir/map_matmul_to_mul_pass.cc @@ -220,7 +220,7 @@ void MapMatmul2MulPass::ApplyImpl(ir::Graph* graph) const { LOG(WARNING) << "Pass in op compat failed."; return; } - OpDesc desc; + OpDesc desc(matmul_op->Op()->Block()); desc.SetType("mul"); desc.SetInput("X", {matmul_in_x->Name()}); desc.SetInput("Y", {matmul_in_y->Name()}); @@ -299,7 +299,7 @@ void Squeeze2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { LOG(WARNING) << "Pass in op compat failed."; return; } - OpDesc desc; + OpDesc desc(matmul_op->Op()->Block()); desc.SetType("mul"); desc.SetInput("X", {squeeze2_in_x->Name()}); desc.SetInput("Y", {matmul_in_y->Name()}); @@ -441,7 +441,7 @@ void Reshape2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { LOG(WARNING) << "Pass in op compat failed."; return; } - OpDesc desc; + OpDesc desc(matmul_op->Op()->Block()); desc.SetType("mul"); desc.SetInput("X", {reshape2_in_x->Name()}); desc.SetInput("Y", {matmul_in_y->Name()}); @@ -526,7 +526,7 @@ void Flatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { LOG(WARNING) << "Pass in op compat failed."; return; } - OpDesc desc; + OpDesc desc(matmul_op->Op()->Block()); desc.SetType("mul"); desc.SetInput("X", {flatten2_in_x->Name()}); desc.SetInput("Y", {matmul_in_y->Name()}); diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 51e5df3e168..0eafbb027f0 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -178,7 +178,7 @@ class OpDesc { } proto::OpDesc desc_; - BlockDesc *block_; // not_own + BlockDesc *block_{nullptr}; // not_own // input arg name => input variable names VariableNameMap inputs_; // output arg name => output variable names diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index f8491e2abf7..efeb0da2780 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -645,7 +645,17 @@ void AnalysisPredictor::OptimizeInferenceProgram() { VLOG(5) << "to prepare executor"; ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program); inference_program_.reset( - new framework::ProgramDesc(argument_.ir_analyzed_program())); + new framework::ProgramDesc(argument_.ir_analyzed_program()), + [](framework::ProgramDesc *prog) { +// Note, please do NOT use any member variables, because member variables may +// have been destructed in multiple threads. +#if PADDLE_WITH_TENSORRT + paddle::inference::Singleton< + inference::tensorrt::TRTEngineManager>::Global() + .DeleteAll(); +#endif + delete prog; + }); // The config and argument take a lot of storage, // when the predictor settings are complete, we release these stores. argument_.PartiallyRelease(); diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 1b0c6c0a71d..d11b4c68cd2 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -159,6 +159,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (op_type == "relu" || op_type == "relu6" || op_type == "tanh" || op_type == "sigmoid") { auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -274,6 +280,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (op_type == "matmul") { auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } for (auto& param_name : desc.Inputs()) { for (auto& var_name : param_name.second) { auto* var_desc = block->FindVar(var_name); @@ -324,6 +336,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (axis[0] == 0 && axis.size() == 2) return false; auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -372,6 +390,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, return false; } else { auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto* x_var_desc = block->FindVar(desc.Input("X")[0]); const auto x_shape = x_var_desc->GetShape(); if (x_shape.size() == 1) { @@ -385,6 +409,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (!with_dynamic_shape) return false; auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto index_var_name = desc.Input("Index")[0]; auto* x_var_desc = block->FindVar(x_var_name); @@ -428,6 +458,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (data_layout != framework::DataLayout::kNCHW) return false; auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -439,6 +475,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (op_type == "multiclass_nms") { if (with_dynamic_shape) return false; auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } for (auto& param_name : desc.Inputs()) { for (auto& var_name : param_name.second) { auto* var_desc = block->FindVar(var_name); @@ -598,6 +640,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, return false; } auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -657,6 +705,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } } auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -724,6 +778,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, return false; } auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto* x_var_desc = block->FindVar(desc.Input("X")[0]); auto* y_var_desc = block->FindVar(desc.Input("Y")[0]); const auto x_shape = x_var_desc->GetShape(); @@ -775,6 +835,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -856,6 +922,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } std::vector shape; auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } for (auto& param_name : desc.Inputs()) { for (auto& var_name : param_name.second) { auto* var_desc = block->FindVar(var_name); @@ -881,6 +953,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (op_type == "scale") { auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -892,6 +970,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, if (op_type == "swish") { auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); @@ -916,6 +1000,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto* var_desc = block->FindVar(desc.Input("Alpha")[0]); if (!var_desc) { VLOG(3) << "Variable Alpha of prelu TRT converter not found."; @@ -1051,6 +1141,12 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, } auto* block = desc.Block(); + if (block == nullptr) { + VLOG(3) << "The block desc is nullptr, we can't continue to analyze. " + "Developers need to check whether block_desc is passed in " + "the pass."; + return false; + } auto x_var_name = desc.Input("X")[0]; auto* x_var_desc = block->FindVar(x_var_name); const auto x_shape = x_var_desc->GetShape(); diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index a548bb304f4..8c62ccddbc9 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -1161,8 +1161,8 @@ function parallel_test_base_gpu() { EOF set -x - # set trt_convert ut to run 30% cases. - export TEST_NUM_PERCENT_CASES=0.3 + # set trt_convert ut to run 15% cases. + export TEST_NUM_PERCENT_CASES=0.15 precison_cases="" bash $PADDLE_ROOT/tools/check_added_ut.sh if [ ${PRECISION_TEST:-OFF} == "ON" ]; then -- GitLab