From 443f604c3bba457e07be85797971748f6c4e8439 Mon Sep 17 00:00:00 2001 From: Pei Yang Date: Mon, 14 Oct 2019 11:22:15 +0800 Subject: [PATCH] add DisableGlogInfo() to AnalysisConfig, test=develop (#20581) --- paddle/fluid/framework/ir/graph_pattern_detector.cc | 3 +-- paddle/fluid/inference/api/analysis_config.cc | 10 ++++++++++ paddle/fluid/inference/api/analysis_predictor.cc | 6 ++++++ paddle/fluid/inference/api/paddle_analysis_config.h | 11 +++++++++++ paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc | 1 + paddle/fluid/pybind/inference_api.cc | 1 + 6 files changed, 30 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index b628ccc8684..d10394f4f31 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -93,8 +93,7 @@ void GraphPatternDetector::operator()(Graph *graph, ValidateByNodeRole(&subgraphs); if (subgraphs.empty()) return; - PrettyLogEndl(Style::detail(), "--- detected %d subgraphs", - subgraphs.size()); + LOG(INFO) << "--- detected " << subgraphs.size() << " subgraphs"; int id = 0; for (auto &g : subgraphs) { VLOG(3) << "optimizing #" << id++ << " subgraph"; diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index ace260c7cdb..398b51e3211 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -131,6 +131,9 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { // profile related. CP_MEMBER(with_profile_); + // glog related. + CP_MEMBER(with_glog_info_); + // Ir related. CP_MEMBER(enable_ir_optim_); CP_MEMBER(use_feed_fetch_ops_); @@ -382,6 +385,8 @@ std::string AnalysisConfig::SerializeInfoCache() { ss << with_profile_; + ss << with_glog_info_; + ss << enable_ir_optim_; ss << use_feed_fetch_ops_; ss << ir_debug_; @@ -458,6 +463,11 @@ void AnalysisConfig::EnableProfile() { Update(); } +void AnalysisConfig::DisableGlogInfo() { + with_glog_info_ = false; + Update(); +} + void AnalysisConfig::EnableAnakinEngine( int max_batch_size, std::map> max_input_shape, int min_subgraph_size, AnalysisConfig::Precision precision_mode, diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index cc841640370..42209d9b0c3 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -506,6 +506,12 @@ std::unique_ptr CreatePaddlePredictor< framework::InitGflags(flags); } } + if (config.glog_info_disabled()) { + google::InitGoogleLogging("Init"); + FLAGS_logtostderr = 1; + FLAGS_minloglevel = google::WARNING; + LOG(WARNING) << " - GLOG's LOG(INFO) is disabled."; + } std::unique_ptr predictor(new AnalysisPredictor(config)); // Each config can only be used for one predictor. diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 7764a498695..40b24cd092d 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -257,6 +257,15 @@ struct AnalysisConfig { */ bool profile_enabled() const { return with_profile_; } + /** \brief Disable GLOG information output for security. + * + * If called, no LOG(INFO) logs will be generated. + */ + void DisableGlogInfo(); + /** A boolean state telling whether the GLOG info is disabled. + */ + bool glog_info_disabled() const { return !with_glog_info_; } + void SetInValid() const { is_valid_ = false; } bool is_valid() const { return is_valid_; } @@ -325,6 +334,8 @@ struct AnalysisConfig { bool with_profile_{false}; + bool with_glog_info_{true}; + // A runtime cache, shouldn't be transferred to others. std::string serialized_info_cache_; diff --git a/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc b/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc index 2ee75f90b44..cb00c9c21c8 100644 --- a/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc +++ b/paddle/fluid/inference/tests/api/trt_fc_prelu_test.cc @@ -35,6 +35,7 @@ TEST(ZeroCopyTensor, uint8) { config.SetModel(model_dir); config.SwitchUseFeedFetchOps(false); config.EnableProfile(); + config.DisableGlogInfo(); std::vector> inputs_all; auto predictor = CreatePaddlePredictor(config); diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index f7a59022285..1b334f3350a 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -338,6 +338,7 @@ void BindAnalysisConfig(py::module *m) { .def("ir_optim", &AnalysisConfig::ir_optim) .def("enable_memory_optim", &AnalysisConfig::EnableMemoryOptim) .def("enable_profile", &AnalysisConfig::EnableProfile) + .def("disable_glog_info", &AnalysisConfig::DisableGlogInfo) .def("set_optim_cache_dir", &AnalysisConfig::SetOptimCacheDir) .def("switch_use_feed_fetch_ops", &AnalysisConfig::SwitchUseFeedFetchOps, py::arg("x") = true) -- GitLab