未验证 提交 443f604c 编写于 作者: P Pei Yang 提交者: GitHub

add DisableGlogInfo() to AnalysisConfig, test=develop (#20581)

上级 2ff18e53
...@@ -93,8 +93,7 @@ void GraphPatternDetector::operator()(Graph *graph, ...@@ -93,8 +93,7 @@ void GraphPatternDetector::operator()(Graph *graph,
ValidateByNodeRole(&subgraphs); ValidateByNodeRole(&subgraphs);
if (subgraphs.empty()) return; if (subgraphs.empty()) return;
PrettyLogEndl(Style::detail(), "--- detected %d subgraphs", LOG(INFO) << "--- detected " << subgraphs.size() << " subgraphs";
subgraphs.size());
int id = 0; int id = 0;
for (auto &g : subgraphs) { for (auto &g : subgraphs) {
VLOG(3) << "optimizing #" << id++ << " subgraph"; VLOG(3) << "optimizing #" << id++ << " subgraph";
......
...@@ -131,6 +131,9 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { ...@@ -131,6 +131,9 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
// profile related. // profile related.
CP_MEMBER(with_profile_); CP_MEMBER(with_profile_);
// glog related.
CP_MEMBER(with_glog_info_);
// Ir related. // Ir related.
CP_MEMBER(enable_ir_optim_); CP_MEMBER(enable_ir_optim_);
CP_MEMBER(use_feed_fetch_ops_); CP_MEMBER(use_feed_fetch_ops_);
...@@ -382,6 +385,8 @@ std::string AnalysisConfig::SerializeInfoCache() { ...@@ -382,6 +385,8 @@ std::string AnalysisConfig::SerializeInfoCache() {
ss << with_profile_; ss << with_profile_;
ss << with_glog_info_;
ss << enable_ir_optim_; ss << enable_ir_optim_;
ss << use_feed_fetch_ops_; ss << use_feed_fetch_ops_;
ss << ir_debug_; ss << ir_debug_;
...@@ -458,6 +463,11 @@ void AnalysisConfig::EnableProfile() { ...@@ -458,6 +463,11 @@ void AnalysisConfig::EnableProfile() {
Update(); Update();
} }
void AnalysisConfig::DisableGlogInfo() {
with_glog_info_ = false;
Update();
}
void AnalysisConfig::EnableAnakinEngine( void AnalysisConfig::EnableAnakinEngine(
int max_batch_size, std::map<std::string, std::vector<int>> max_input_shape, int max_batch_size, std::map<std::string, std::vector<int>> max_input_shape,
int min_subgraph_size, AnalysisConfig::Precision precision_mode, int min_subgraph_size, AnalysisConfig::Precision precision_mode,
......
...@@ -506,6 +506,12 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor< ...@@ -506,6 +506,12 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
framework::InitGflags(flags); framework::InitGflags(flags);
} }
} }
if (config.glog_info_disabled()) {
google::InitGoogleLogging("Init");
FLAGS_logtostderr = 1;
FLAGS_minloglevel = google::WARNING;
LOG(WARNING) << " - GLOG's LOG(INFO) is disabled.";
}
std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config)); std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
// Each config can only be used for one predictor. // Each config can only be used for one predictor.
......
...@@ -257,6 +257,15 @@ struct AnalysisConfig { ...@@ -257,6 +257,15 @@ struct AnalysisConfig {
*/ */
bool profile_enabled() const { return with_profile_; } bool profile_enabled() const { return with_profile_; }
/** \brief Disable GLOG information output for security.
*
* If called, no LOG(INFO) logs will be generated.
*/
void DisableGlogInfo();
/** A boolean state telling whether the GLOG info is disabled.
*/
bool glog_info_disabled() const { return !with_glog_info_; }
void SetInValid() const { is_valid_ = false; } void SetInValid() const { is_valid_ = false; }
bool is_valid() const { return is_valid_; } bool is_valid() const { return is_valid_; }
...@@ -325,6 +334,8 @@ struct AnalysisConfig { ...@@ -325,6 +334,8 @@ struct AnalysisConfig {
bool with_profile_{false}; bool with_profile_{false};
bool with_glog_info_{true};
// A runtime cache, shouldn't be transferred to others. // A runtime cache, shouldn't be transferred to others.
std::string serialized_info_cache_; std::string serialized_info_cache_;
......
...@@ -35,6 +35,7 @@ TEST(ZeroCopyTensor, uint8) { ...@@ -35,6 +35,7 @@ TEST(ZeroCopyTensor, uint8) {
config.SetModel(model_dir); config.SetModel(model_dir);
config.SwitchUseFeedFetchOps(false); config.SwitchUseFeedFetchOps(false);
config.EnableProfile(); config.EnableProfile();
config.DisableGlogInfo();
std::vector<std::vector<PaddleTensor>> inputs_all; std::vector<std::vector<PaddleTensor>> inputs_all;
auto predictor = CreatePaddlePredictor(config); auto predictor = CreatePaddlePredictor(config);
......
...@@ -338,6 +338,7 @@ void BindAnalysisConfig(py::module *m) { ...@@ -338,6 +338,7 @@ void BindAnalysisConfig(py::module *m) {
.def("ir_optim", &AnalysisConfig::ir_optim) .def("ir_optim", &AnalysisConfig::ir_optim)
.def("enable_memory_optim", &AnalysisConfig::EnableMemoryOptim) .def("enable_memory_optim", &AnalysisConfig::EnableMemoryOptim)
.def("enable_profile", &AnalysisConfig::EnableProfile) .def("enable_profile", &AnalysisConfig::EnableProfile)
.def("disable_glog_info", &AnalysisConfig::DisableGlogInfo)
.def("set_optim_cache_dir", &AnalysisConfig::SetOptimCacheDir) .def("set_optim_cache_dir", &AnalysisConfig::SetOptimCacheDir)
.def("switch_use_feed_fetch_ops", &AnalysisConfig::SwitchUseFeedFetchOps, .def("switch_use_feed_fetch_ops", &AnalysisConfig::SwitchUseFeedFetchOps,
py::arg("x") = true) py::arg("x") = true)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册