未验证 提交 122b37ce 编写于 作者: P Pei Yang 提交者: GitHub

make config option DisableGlogInfo() able to mute all inference logs (#21318)

* make DisableGlogInfo able to mute all logs in inference. 
上级 4c9b3daf
......@@ -102,9 +102,8 @@ void NaiveExecutor::CreateOps(const ProgramDesc &desc, int block_id,
for (const auto &op_desc : desc.Block(block_id).AllOps()) {
if (!with_feed_fetch_ops &&
(op_desc->Type() == "feed" || op_desc->Type() == "fetch")) {
string::PrettyLogEndl(string::Style::detail(), "--- skip [%s], %s -> %s",
op_desc->Input("X")[0], op_desc->Type(),
op_desc->Output("Out")[0]);
LOG(INFO) << "--- skip [" << op_desc->Input("X")[0] << "], "
<< op_desc->Type() << " -> " << op_desc->Output("Out")[0];
continue;
}
ops_.emplace_back(OpRegistry::CreateOp(*op_desc));
......
......@@ -29,8 +29,11 @@ void Analyzer::Run(Argument *argument) { RunAnalysis(argument); }
void Analyzer::RunAnalysis(Argument *argument) {
PADDLE_ENFORCE(argument->analysis_passes_valid(),
"analsis_passes is not valid in the argument.");
const bool disable_logs = argument->disable_logs();
for (auto &pass : argument->analysis_passes()) {
string::PrettyLogH1("--- Running analysis [%s]", pass);
if (!disable_logs) {
string::PrettyLogH1("--- Running analysis [%s]", pass);
}
if (!argument->enable_analysis_optim() && pass == "ir_analysis_pass")
continue;
......
......@@ -29,6 +29,7 @@ using namespace framework; // NOLINT
TEST(Analyzer, analysis_without_tensorrt) {
Argument argument;
argument.SetDisableLogs(false);
argument.SetModelDir(FLAGS_inference_model_dir);
argument.SetEnableAnalysisOptim(false);
argument.SetUseGPU(false);
......@@ -41,6 +42,7 @@ TEST(Analyzer, analysis_without_tensorrt) {
TEST(Analyzer, analysis_with_tensorrt) {
Argument argument;
argument.SetDisableLogs(false);
argument.SetEnableAnalysisOptim(false);
argument.SetTensorRtMaxBatchSize(3);
argument.SetTensorRtWorkspaceSize(1 << 20);
......
......@@ -149,6 +149,9 @@ struct Argument {
DECL_ARGUMENT_FIELD(analysis_passes, AnalysisPasses,
std::vector<std::string>);
// whether to mute all logs in inference.
DECL_ARGUMENT_FIELD(disable_logs, DisableLogs, bool);
// Pass a set of op types to enable its mkldnn kernel
DECL_ARGUMENT_FIELD(mkldnn_enabled_op_types, MKLDNNEnabledOpTypes,
std::unordered_set<std::string>);
......
......@@ -147,6 +147,7 @@ void IRPassManager::CreatePasses(Argument *argument,
pass->Set("auto_config_layout",
new bool(argument->anakin_auto_config_layout()));
}
disable_logs_ = argument->disable_logs();
if (pass_name == "fc_fuse_pass") {
pass->Set("use_gpu", new bool(argument->use_gpu()));
}
......@@ -164,7 +165,7 @@ std::unique_ptr<Graph> IRPassManager::Apply(std::unique_ptr<Graph> graph) {
PADDLE_ENFORCE(graph.get());
// Apply all the passes
for (const auto &pass : passes_) {
if (pass->Type() != "graph_viz_pass") {
if (pass->Type() != "graph_viz_pass" && !disable_logs_) {
PrettyLogEndl(Style::H2(), "--- Running IR pass [%s]", pass->Type());
}
graph.reset(pass->Apply(graph.release()));
......
......@@ -56,6 +56,7 @@ class IRPassManager final {
std::unique_ptr<Graph> graph_;
std::vector<std::unique_ptr<framework::ir::Pass>> passes_;
bool disable_logs_{false};
};
} // namespace analysis
......
......@@ -108,8 +108,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
framework::BlockDesc block_desc(nullptr, &block_proto);
block_desc.Proto()->set_parent_idx(-1);
block_desc.Proto()->set_idx(0);
string::PrettyLogDetail("--- detect a sub-graph with %d nodes",
subgraph.size());
LOG(INFO) << "--- detect a sub-graph with " << subgraph.size() << " nodes";
for (auto *node : subgraph) {
auto *new_block_op = new_block->AppendOp();
......
......@@ -452,6 +452,7 @@ void AnalysisPredictor::PrepareArgument() {
passes.clear();
LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
}
argument_.SetDisableLogs(config_.glog_info_disabled());
argument_.SetIrAnalysisPasses(passes);
argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
argument_.SetScopeNotOwned(scope_.get());
......@@ -508,10 +509,10 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
framework::InitGflags(flags);
}
}
framework::InitGLOG("");
if (config.glog_info_disabled()) {
FLAGS_logtostderr = 1;
FLAGS_minloglevel = google::WARNING;
LOG(WARNING) << " - GLOG's LOG(INFO) is disabled.";
FLAGS_minloglevel = 2; // GLOG_ERROR
}
std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
......
......@@ -46,6 +46,7 @@ namespace framework {
#endif
std::once_flag gflags_init_flag;
std::once_flag glog_init_flag;
std::once_flag p2p_init_flag;
std::once_flag glog_warning_once_flag;
......@@ -223,13 +224,15 @@ void SignalHandle(const char *data, int size) {
#endif
void InitGLOG(const std::string &prog_name) {
// glog will not hold the ARGV[0] inside.
// Use strdup to alloc a new string.
google::InitGoogleLogging(strdup(prog_name.c_str()));
std::call_once(glog_init_flag, [&]() {
// glog will not hold the ARGV[0] inside.
// Use strdup to alloc a new string.
google::InitGoogleLogging(strdup(prog_name.c_str()));
#ifndef _WIN32
google::InstallFailureSignalHandler();
google::InstallFailureWriter(&SignalHandle);
google::InstallFailureSignalHandler();
google::InstallFailureWriter(&SignalHandle);
#endif
});
}
} // namespace framework
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册