diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index 3b5be7f3ee33c73a9704bafa9f1b736c8a3cd9ea..eb0aeafb8f538e4b7ffe80db7eec9b764afc0f43 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -37,12 +37,20 @@ TEST(Analyzer, analysis_without_tensorrt) { TEST(Analyzer, analysis_with_tensorrt) { FLAGS_IA_enable_tensorrt_subgraph_engine = true; Argument argument; + int* minimum_subgraph_size = new int(0); + int* max_batch_size = new int(3); + int* workspace_size = new int(1 << 20); + std::string* precision_mode = new std::string("FP32"); + argument.Set("minimum_subgraph_size", minimum_subgraph_size); + argument.Set("max_batch_size", max_batch_size); + argument.Set("workspace_size", workspace_size); + argument.Set("precision_mode", precision_mode); argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir)); Analyzer analyser; analyser.Run(&argument); } -void TestWord2vecPrediction(const std::string &model_path) { +void TestWord2vecPrediction(const std::string& model_path) { NativeConfig config; config.model_dir = model_path; config.use_gpu = false; @@ -73,8 +81,8 @@ void TestWord2vecPrediction(const std::string &model_path) { // The outputs' buffers are in CPU memory. for (size_t i = 0; i < std::min(5UL, num_elements); i++) { LOG(INFO) << "data: " - << static_cast(outputs.front().data.data())[i]; - PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], + << static_cast(outputs.front().data.data())[i]; + PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], result[i]); } } diff --git a/paddle/fluid/inference/analysis/subgraph_splitter.cc b/paddle/fluid/inference/analysis/subgraph_splitter.cc index e0a7a1969cb14dbe5e8943f12d1688b0454a78a3..526bbbadfe90c3064d7c620cc22e30f7fef99088 100644 --- a/paddle/fluid/inference/analysis/subgraph_splitter.cc +++ b/paddle/fluid/inference/analysis/subgraph_splitter.cc @@ -309,7 +309,7 @@ void SubGraphFuse::operator()() { ReplaceNodesWithSubGraphs(); } void SubGraphFuse::ReplaceNodesWithSubGraphs() { auto subgraphs = SubGraphSplitter(graph_, node_inside_subgraph_teller_)(); for (auto &subgraph : subgraphs) { - if (subgraph.size() <= argument_->Get("minimun_subgraph_size")) + if (subgraph.size() <= argument_->Get("minimum_subgraph_size")) continue; std::unordered_set subgraph_uniq(subgraph.begin(), subgraph.end()); // replace this sub-graph with the first node. Two steps: 1. Create a Block diff --git a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc index a9734301e6b5ab8833c76adee857323739bd401b..bf0b46e0d76bd54a696c1d99cb72b4d247c7298f 100644 --- a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc +++ b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc @@ -68,7 +68,7 @@ TEST(SubGraphSplitter, Fuse) { auto dfg = ProgramDescToDFG(desc); Argument argument; int* minmum_subgraph_size = new int(3); - argument.Set("minmum_subgraph_size", minmum_subgraph_size); + argument.Set("minimum_subgraph_size", minmum_subgraph_size); size_t count0 = dfg.nodes.size(); diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc index 67a5af83d89b771536ea11be51b35244ff5c09d6..fcdd3a03a82ad0d1b2afad32cd4904d440d399e3 100644 --- a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc @@ -36,6 +36,14 @@ TEST(TensorRTSubGraphPass, main) { }; Argument argument(FLAGS_inference_model_dir); + int* minimum_subgraph_size = new int(0); + int* max_batch_size = new int(3); + int* workspace_size = new int(1 << 20); + std::string* precision_mode = new std::string("FP32"); + argument.Set("minimun_subgraph_size", minimum_subgraph_size); + argument.Set("max_batch_size", max_batch_size); + argument.Set("workspace_size", workspace_size); + argument.Set("precision_mode", precision_mode); DFG_GraphvizDrawPass::Config config{FLAGS_dot_dir, "origin"}; DFG_GraphvizDrawPass::Config config1{FLAGS_dot_dir, "fusion"}; diff --git a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc index 945b85b7f82c3bd0b4f980164740b253e5d9076a..8be444949655ed404179ca786245cbd95f55db4e 100644 --- a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc @@ -94,7 +94,7 @@ class TensorRTSubgraphPredictor : public NativePaddlePredictor { int* max_batch_size = new int(config_.max_batch_size); int* workspace_size = new int(config_.workspace_size); std::string* precision_mode = new std::string(config_.precision_mode); - argument.Set("minimun_subgraph_size", minimum_subgraph_size); + argument.Set("minimum_subgraph_size", minimum_subgraph_size); argument.Set("max_batch_size", max_batch_size); argument.Set("workspace_size", workspace_size); argument.Set("precision_mode", precision_mode);