From 4b59646ed1a2f32bc69ec01c645d69e9e88704a1 Mon Sep 17 00:00:00 2001 From: nhzlx Date: Wed, 27 Feb 2019 09:43:18 +0000 Subject: [PATCH] fix comments and fix cpplint test=develop --- paddle/fluid/framework/ir/fuse_pass_base.h | 2 +- paddle/fluid/inference/analysis/helper.h | 2 ++ paddle/fluid/inference/analysis/ir_pass_manager.h | 3 +++ .../inference/analysis/ir_passes/tensorrt_subgraph_pass.cc | 2 +- .../inference/analysis/ir_passes/tensorrt_subgraph_pass.h | 5 ++++- paddle/fluid/inference/api/analysis_predictor.h | 1 + paddle/fluid/inference/tensorrt/convert/op_converter.h | 1 + paddle/fluid/inference/tensorrt/convert/ut_helper.h | 2 ++ paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h | 1 + paddle/fluid/inference/tensorrt/plugin/trt_plugin.h | 1 + .../fluid/inference/tensorrt/plugin/trt_plugin_factory.h | 1 + paddle/fluid/inference/tensorrt/test_engine.cc | 7 ++++++- paddle/fluid/operators/tensorrt/tensorrt_engine_op.h | 6 ++++-- 13 files changed, 28 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/framework/ir/fuse_pass_base.h b/paddle/fluid/framework/ir/fuse_pass_base.h index ed3796c5ff..3a1022bbcb 100644 --- a/paddle/fluid/framework/ir/fuse_pass_base.h +++ b/paddle/fluid/framework/ir/fuse_pass_base.h @@ -25,7 +25,7 @@ namespace ir { static const char kParamScopeAttr[] = "__param_scope__"; static const char kFuseStatisAttr[] = "__fuse_statis__"; -// When we use trt or other third_party lib, the parameters are managered by +// When we use trt or other third_party lib, the parameters are managed by // the lib, but not the fluid. So we need to record them to avoid duplicate // allocation. static const char kRepetitiveParamAttr[] = "__repetitive_param__"; diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index 9fa85f3762..a480584002 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -17,10 +17,12 @@ limitations under the License. */ #include #include #include +#include #include #include #include #include +#include #include #include "paddle/fluid/framework/framework.pb.h" diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.h b/paddle/fluid/inference/analysis/ir_pass_manager.h index 2a595cb36b..2d120679ee 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.h +++ b/paddle/fluid/inference/analysis/ir_pass_manager.h @@ -22,7 +22,10 @@ #pragma once +#include #include +#include +#include #include #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/pass.h" diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 2b5ae2a840..8b796c207f 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -235,7 +235,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp( std::string trt_engine_serialized_data = GetTrtEngineSerializedData( Get("model_opt_cache_dir"), engine_key); - if (trt_engine_serialized_data.size() == 0) { + if (trt_engine_serialized_data.empty()) { LOG(INFO) << "Prepare TRT engine (Optimize model structure, Select OP " "kernel etc). This process may cost a lot of time."; std::unique_ptr trt_engine( diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h index 144f8bbd0e..6689a668fc 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h @@ -13,9 +13,12 @@ // limitations under the License. #pragma once -#include +#include #include +#include +#include #include +#include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/pass.h" namespace paddle { diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index b9d0fdc51c..cc06e3479c 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -15,6 +15,7 @@ #pragma once #include #include +#include #include #include #include "paddle/fluid/framework/naive_executor.h" diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 8484daaa12..90ed90b1e2 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include +#include #include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/inference/tensorrt/convert/ut_helper.h b/paddle/fluid/inference/tensorrt/convert/ut_helper.h index d7cca0e456..2571abbf69 100644 --- a/paddle/fluid/inference/tensorrt/convert/ut_helper.h +++ b/paddle/fluid/inference/tensorrt/convert/ut_helper.h @@ -19,7 +19,9 @@ limitations under the License. */ #pragma once +#include #include +#include #include #include "paddle/fluid/framework/lod_tensor.h" diff --git a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h index 16553d44a5..cbb7259056 100644 --- a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h" diff --git a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h index 7355041365..3b737bd726 100644 --- a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_utils.h" diff --git a/paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h b/paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h index 061dd30497..139c75595f 100644 --- a/paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h +++ b/paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc index 0975a66ec6..a03dd45db0 100644 --- a/paddle/fluid/inference/tensorrt/test_engine.cc +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -35,7 +35,12 @@ class TensorRTEngineTest : public ::testing::Test { engine_->InitNetwork(); } - void TearDown() override { delete engine_; } + void TearDown() override { + if (engine_) { + delete engine_; + engine_ = nullptr; + } + } void PrepareInputOutput(const std::vector &input, std::vector output_shape) { diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index 3f98b0a934..c366733124 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -16,8 +16,10 @@ #ifdef PADDLE_WITH_CUDA +#include #include #include +#include #include #include "paddle/fluid/framework/executor.h" @@ -220,11 +222,11 @@ class TensorRTEngineOp : public framework::OperatorBase { TensorRTEngine *GetEngine(const framework::Scope &scope, const platform::Place &dev_place) const { - if (trt_engine_.get() == nullptr) { + if (!trt_engine_) { trt_engine_.reset(new inference::tensorrt::TensorRTEngine( max_batch_size_, workspace_size_, enable_int8_, calibrator_.get(), boost::get(dev_place).device)); - if (engine_serialized_data_.size() > 0) { + if (!engine_serialized_data_.empty()) { trt_engine_->Deserialize(engine_serialized_data_); } else { PrepareTRTEngine(scope, trt_engine_.get()); -- GitLab