From 857cd9f8516b8a52f580c0388662f92974a31d33 Mon Sep 17 00:00:00 2001 From: Pei Yang Date: Wed, 4 Dec 2019 21:41:35 +0800 Subject: [PATCH] make config option DisableGlogInfo() able to mute all inference logs (#21544) make config option DisableGlogInfo() able to mute all inference logs --- paddle/fluid/framework/naive_executor.cc | 7 ++++--- paddle/fluid/inference/analysis/analyzer.cc | 5 ++++- paddle/fluid/inference/analysis/analyzer_tester.cc | 2 ++ paddle/fluid/inference/analysis/argument.h | 3 +++ paddle/fluid/inference/analysis/ir_pass_manager.cc | 6 +++++- paddle/fluid/inference/analysis/ir_pass_manager.h | 1 + .../analysis/ir_passes/tensorrt_subgraph_pass.cc | 3 +-- paddle/fluid/inference/api/analysis_predictor.cc | 5 +++-- paddle/fluid/platform/init.cc | 13 ++++++++----- 9 files changed, 31 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/framework/naive_executor.cc b/paddle/fluid/framework/naive_executor.cc index a37bb6f4da..a18f9df9a9 100644 --- a/paddle/fluid/framework/naive_executor.cc +++ b/paddle/fluid/framework/naive_executor.cc @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include #include +#include #include #include "paddle/fluid/framework/feed_fetch_method.h" @@ -100,9 +102,8 @@ void NaiveExecutor::CreateOps(const ProgramDesc &desc, int block_id, for (const auto &op_desc : desc.Block(block_id).AllOps()) { if (!with_feed_fetch_ops && (op_desc->Type() == "feed" || op_desc->Type() == "fetch")) { - string::PrettyLogEndl(string::Style::detail(), "--- skip [%s], %s -> %s", - op_desc->Input("X")[0], op_desc->Type(), - op_desc->Output("Out")[0]); + LOG(INFO) << "--- skip [" << op_desc->Input("X")[0] << "], " + << op_desc->Type() << " -> " << op_desc->Output("Out")[0]; continue; } ops_.emplace_back(OpRegistry::CreateOp(*op_desc)); diff --git a/paddle/fluid/inference/analysis/analyzer.cc b/paddle/fluid/inference/analysis/analyzer.cc index 71fdb5570c..d6d0371eda 100644 --- a/paddle/fluid/inference/analysis/analyzer.cc +++ b/paddle/fluid/inference/analysis/analyzer.cc @@ -29,8 +29,11 @@ void Analyzer::Run(Argument *argument) { RunAnalysis(argument); } void Analyzer::RunAnalysis(Argument *argument) { PADDLE_ENFORCE(argument->analysis_passes_valid(), "analsis_passes is not valid in the argument."); + const bool disable_logs = argument->disable_logs(); for (auto &pass : argument->analysis_passes()) { - string::PrettyLogH1("--- Running analysis [%s]", pass); + if (!disable_logs) { + string::PrettyLogH1("--- Running analysis [%s]", pass); + } if (!argument->enable_analysis_optim() && pass == "ir_analysis_pass") continue; diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index 489345da49..79784fcb9b 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -29,6 +29,7 @@ using namespace framework; // NOLINT TEST(Analyzer, analysis_without_tensorrt) { Argument argument; + argument.SetDisableLogs(false); argument.SetModelDir(FLAGS_inference_model_dir); argument.SetEnableAnalysisOptim(false); argument.SetUseGPU(false); @@ -41,6 +42,7 @@ TEST(Analyzer, analysis_without_tensorrt) { TEST(Analyzer, analysis_with_tensorrt) { Argument argument; + argument.SetDisableLogs(false); argument.SetEnableAnalysisOptim(false); argument.SetTensorRtMaxBatchSize(3); argument.SetTensorRtWorkspaceSize(1 << 20); diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 42858655aa..5b47e9ebff 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -149,6 +149,9 @@ struct Argument { DECL_ARGUMENT_FIELD(analysis_passes, AnalysisPasses, std::vector); + // whether to mute all logs in inference. + DECL_ARGUMENT_FIELD(disable_logs, DisableLogs, bool); + // Pass a set of op types to enable its mkldnn kernel DECL_ARGUMENT_FIELD(mkldnn_enabled_op_types, MKLDNNEnabledOpTypes, std::unordered_set); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 3fa907b418..174d6e3fc1 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -147,6 +147,10 @@ void IRPassManager::CreatePasses(Argument *argument, pass->Set("auto_config_layout", new bool(argument->anakin_auto_config_layout())); } + disable_logs_ = argument->disable_logs(); + if (pass_name == "fc_fuse_pass") { + pass->Set("use_gpu", new bool(argument->use_gpu())); + } pre_pass = pass_name; @@ -161,7 +165,7 @@ std::unique_ptr IRPassManager::Apply(std::unique_ptr graph) { PADDLE_ENFORCE(graph.get()); // Apply all the passes for (const auto &pass : passes_) { - if (pass->Type() != "graph_viz_pass") { + if (pass->Type() != "graph_viz_pass" && !disable_logs_) { PrettyLogEndl(Style::H2(), "--- Running IR pass [%s]", pass->Type()); } graph.reset(pass->Apply(graph.release())); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.h b/paddle/fluid/inference/analysis/ir_pass_manager.h index 2d120679ee..f96b4a0f13 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.h +++ b/paddle/fluid/inference/analysis/ir_pass_manager.h @@ -56,6 +56,7 @@ class IRPassManager final { std::unique_ptr graph_; std::vector> passes_; + bool disable_logs_{false}; }; } // namespace analysis diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 8c181ba2fd..a173c899db 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -108,8 +108,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp( framework::BlockDesc block_desc(nullptr, &block_proto); block_desc.Proto()->set_parent_idx(-1); block_desc.Proto()->set_idx(0); - string::PrettyLogDetail("--- detect a sub-graph with %d nodes", - subgraph.size()); + LOG(INFO) << "--- detect a sub-graph with " << subgraph.size() << " nodes"; for (auto *node : subgraph) { auto *new_block_op = new_block->AppendOp(); diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 513f58e9be..4e8d2094e2 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -451,6 +451,7 @@ void AnalysisPredictor::PrepareArgument() { passes.clear(); LOG(INFO) << "ir_optim is turned off, no IR pass will be executed"; } + argument_.SetDisableLogs(config_.glog_info_disabled()); argument_.SetIrAnalysisPasses(passes); argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses()); argument_.SetScopeNotOwned(scope_.get()); @@ -507,10 +508,10 @@ std::unique_ptr CreatePaddlePredictor< framework::InitGflags(flags); } } + framework::InitGLOG(""); if (config.glog_info_disabled()) { FLAGS_logtostderr = 1; - FLAGS_minloglevel = google::WARNING; - LOG(WARNING) << " - GLOG's LOG(INFO) is disabled."; + FLAGS_minloglevel = 2; // GLOG_ERROR } std::unique_ptr predictor(new AnalysisPredictor(config)); diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 04687b0b38..9ed409544d 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -46,6 +46,7 @@ namespace framework { #endif std::once_flag gflags_init_flag; +std::once_flag glog_init_flag; std::once_flag p2p_init_flag; void InitGflags(std::vector argv) { @@ -213,13 +214,15 @@ void SignalHandle(const char *data, int size) { #endif void InitGLOG(const std::string &prog_name) { - // glog will not hold the ARGV[0] inside. - // Use strdup to alloc a new string. - google::InitGoogleLogging(strdup(prog_name.c_str())); + std::call_once(glog_init_flag, [&]() { + // glog will not hold the ARGV[0] inside. + // Use strdup to alloc a new string. + google::InitGoogleLogging(strdup(prog_name.c_str())); #ifndef _WIN32 - google::InstallFailureSignalHandler(); - google::InstallFailureWriter(&SignalHandle); + google::InstallFailureSignalHandler(); + google::InstallFailureWriter(&SignalHandle); #endif + }); } } // namespace framework -- GitLab