From 2763321684680cb13a106de74f06a2b3e14e9c8a Mon Sep 17 00:00:00 2001 From: nhzlx Date: Fri, 21 Sep 2018 12:30:50 +0000 Subject: [PATCH] fix comments --- paddle/fluid/inference/analysis/analyzer_tester.cc | 12 ++++-------- .../analysis/data_flow_graph_to_fluid_pass.cc | 1 + .../inference/analysis/subgraph_splitter_tester.cc | 3 +-- .../analysis/tensorrt_subgraph_pass_tester.cc | 12 ++++-------- .../inference/api/api_tensorrt_subgraph_engine.cc | 14 ++++++-------- paddle/fluid/inference/api/paddle_inference_api.h | 3 +-- paddle/fluid/operators/tensorrt_engine_op.cc | 2 +- 7 files changed, 18 insertions(+), 29 deletions(-) diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index eb0aeafb8..f90910ac0 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -37,14 +37,10 @@ TEST(Analyzer, analysis_without_tensorrt) { TEST(Analyzer, analysis_with_tensorrt) { FLAGS_IA_enable_tensorrt_subgraph_engine = true; Argument argument; - int* minimum_subgraph_size = new int(0); - int* max_batch_size = new int(3); - int* workspace_size = new int(1 << 20); - std::string* precision_mode = new std::string("FP32"); - argument.Set("minimum_subgraph_size", minimum_subgraph_size); - argument.Set("max_batch_size", max_batch_size); - argument.Set("workspace_size", workspace_size); - argument.Set("precision_mode", precision_mode); + argument.Set("minimum_subgraph_size", new int(0)); + argument.Set("max_batch_size", new int(3)); + argument.Set("workspace_size", new int(1 << 20)); + argument.Set("precision_mode", new std::string("FP32")); argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir)); Analyzer analyser; analyser.Run(&argument); diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc index 991343960..cb549f4b5 100644 --- a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc @@ -99,6 +99,7 @@ void DataFlowGraphToFluidPass::AddFluidOp(Node *node) { void CreateTrtEngineOp(Node *node, Argument *argument, framework::proto::BlockDesc *block) { + PADDLE_ENFORCE(argument->main_dfg.get()); const DataFlowGraph &graph = *(argument->main_dfg); static int counter{0}; PADDLE_ENFORCE(node->IsFunctionBlock()); diff --git a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc index bf0b46e0d..e1dc89fab 100644 --- a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc +++ b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc @@ -67,8 +67,7 @@ TEST(SubGraphSplitter, Fuse) { auto desc = LoadProgramDesc(FLAGS_inference_model_dir + "/__model__"); auto dfg = ProgramDescToDFG(desc); Argument argument; - int* minmum_subgraph_size = new int(3); - argument.Set("minimum_subgraph_size", minmum_subgraph_size); + argument.Set("minimum_subgraph_size", new int(3)); size_t count0 = dfg.nodes.size(); diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc index 4d6492fc1..9748e24b0 100644 --- a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc @@ -36,14 +36,10 @@ TEST(TensorRTSubGraphPass, main) { }; Argument argument(FLAGS_inference_model_dir); - int* minimum_subgraph_size = new int(0); - int* max_batch_size = new int(3); - int* workspace_size = new int(1 << 20); - std::string* precision_mode = new std::string("FP32"); - argument.Set("minimum_subgraph_size", minimum_subgraph_size); - argument.Set("max_batch_size", max_batch_size); - argument.Set("workspace_size", workspace_size); - argument.Set("precision_mode", precision_mode); + argument.Set("minimum_subgraph_size", new int(0)); + argument.Set("max_batch_size", new int(3)); + argument.Set("workspace_size", new int(1 << 20)); + argument.Set("precision_mode", new std::string("FP32")); DFG_GraphvizDrawPass::Config config{FLAGS_dot_dir, "origin"}; DFG_GraphvizDrawPass::Config config1{FLAGS_dot_dir, "fusion"}; diff --git a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc index ce147eb5d..2b9be77e9 100644 --- a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc @@ -90,14 +90,12 @@ class TensorRTSubgraphPredictor : public NativePaddlePredictor { // Analyze inference_program Argument argument; - int* minimum_subgraph_size = new int(config_.minimum_subgraph_size); - int* max_batch_size = new int(config_.max_batch_size); - int* workspace_size = new int(config_.workspace_size); - std::string* precision_mode = new std::string(config_.precision_mode); - argument.Set("minimum_subgraph_size", minimum_subgraph_size); - argument.Set("max_batch_size", max_batch_size); - argument.Set("workspace_size", workspace_size); - argument.Set("precision_mode", precision_mode); + argument.Set("minimum_subgraph_size", + new int(config_.minimum_subgraph_size)); + argument.Set("max_batch_size", new int(config_.max_batch_size)); + argument.Set("workspace_size", new int(config_.workspace_size)); + argument.Set("precision_mode", + new std::string(config_.precision_mode)); if (!config_.model_dir.empty()) { argument.fluid_model_dir.reset(new std::string(config_.model_dir)); diff --git a/paddle/fluid/inference/api/paddle_inference_api.h b/paddle/fluid/inference/api/paddle_inference_api.h index bacb319e2..d0527d714 100644 --- a/paddle/fluid/inference/api/paddle_inference_api.h +++ b/paddle/fluid/inference/api/paddle_inference_api.h @@ -153,8 +153,7 @@ struct TensorRTConfig : public NativeConfig { // We transform the Ops that can be converted into TRT layer in the model, // and aggregate these Ops into subgraphs for TRT execution. // We set this variable to control the minimum number of nodes in the - // subgraph, 3 as - // default value. + // subgraph, 3 as default value. int minimum_subgraph_size = 3; // Reserved configuration // We just support "FP32" now, "FP16" and "INT8" will be supported. diff --git a/paddle/fluid/operators/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt_engine_op.cc index b34fa5521..41a5786fe 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.cc +++ b/paddle/fluid/operators/tensorrt_engine_op.cc @@ -33,7 +33,7 @@ class TensorRTEngineOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("subgraph", "the subgraph."); AddAttr("engine_uniq_key", "unique key for the TRT engine."); AddAttr("max_batch_size", "the maximum batch size."); - AddAttr("workspace_size", "the maximum batch size."); + AddAttr("workspace_size", "the workspace size."); AddComment("TensorRT engine operator."); } }; -- GitLab