From f639bc694750c3c15ec1e9411b93a01daf9eb914 Mon Sep 17 00:00:00 2001 From: pangyoki Date: Tue, 13 Sep 2022 20:16:14 +0800 Subject: [PATCH] add log while running New Executor, Old Executor and ParallelExecutor and change log level (#45814) * optimize executor log * delete log in new exe * add log for old executor * use LOG_FIRST_N(INFO, 1) --- paddle/fluid/framework/executor.cc | 1 + .../framework/new_executor/interpretercore.cc | 1 + .../new_executor/interpretercore_util.cc | 4 ++-- paddle/fluid/framework/parallel_executor.cc | 2 ++ python/paddle/fluid/executor.py | 20 +++++++++---------- 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 7b73b33037..fb456fcb68 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -183,6 +183,7 @@ void Executor::Run(const ProgramDesc& pdesc, const std::vector& skip_ref_cnt_vars, bool force_disable_gc, bool keep_kid_scopes) { + LOG_FIRST_N(INFO, 1) << "Old Executor is Running."; platform::RecordEvent record_run( "Executor::Run", platform::TracerEventType::UserDefined, 1); platform::RecordBlock b(block_id); diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index 39746f0734..59285dd6e6 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -193,6 +193,7 @@ paddle::framework::FetchList InterpreterCore::Run( #endif if (!is_build_) { + LOG_FIRST_N(INFO, 1) << "New Executor is Running."; paddle::framework::interpreter::build_variable_scope( block_, &var_scope_, create_local_scope_); diff --git a/paddle/fluid/framework/new_executor/interpretercore_util.cc b/paddle/fluid/framework/new_executor/interpretercore_util.cc index a11ac6ff98..6fbb14287e 100644 --- a/paddle/fluid/framework/new_executor/interpretercore_util.cc +++ b/paddle/fluid/framework/new_executor/interpretercore_util.cc @@ -418,7 +418,6 @@ void build_op_func_list(const platform::Place& place, : var_scope->GetMutableScope(); std::vector> ops_unique; // its elements will be moved to vec_func_list - bool flag_log_is_printed = false; // Step 1: create all ops for current block. create_all_ops(block, &ops_unique); @@ -443,6 +442,7 @@ void build_op_func_list(const platform::Place& place, } auto unused_var_map = get_unused_vars(block, ops); + bool flag_log_is_printed = false; for (size_t i = 0; i < ops.size(); ++i) { auto op = ops[i].get(); const std::string& op_type = op->Type(); @@ -452,7 +452,7 @@ void build_op_func_list(const platform::Place& place, // Print new executor log if grad op is used. // It's only for test and will be removed later. if (!flag_log_is_printed && op_type.find("_grad") != std::string::npos) { - VLOG(0) << "Standalone Executor is Used."; + LOG_FIRST_N(INFO, 1) << "Standalone Executor is Used."; flag_log_is_printed = true; } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 26150b2d04..cfb92bb178 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -983,6 +983,7 @@ void ParallelExecutor::BCastParamsToDevices( FetchUnmergedList ParallelExecutor::Run( const std::vector &fetch_tensors) { + LOG_FIRST_N(INFO, 1) << "ParallelExecutor is Running (Run)."; PreludeToRun(fetch_tensors); platform::RecordBlock b(0); @@ -1000,6 +1001,7 @@ FetchUnmergedList ParallelExecutor::Run( FetchList ParallelExecutor::RunAndMerge( const std::vector &fetch_tensors) { + LOG_FIRST_N(INFO, 1) << "ParallelExecutor is Running (RunAndMerge)."; PreludeToRun(fetch_tensors); platform::RecordBlock b(0); diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 39c3468d5f..3b21262cc4 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -771,9 +771,6 @@ class _ExecutorCache(object): inner_program = converted_program # print(f"Program after convert:\n {inner_program}", flush=True) - warnings.warn( - "FLAGS_USE_STANDALONE_EXECUTOR and FLAGS_CONVERT_GRAPH_TO_PROGRAM is set to 1. Graph will be converted to Program and executed using new executor." - ) else: build_strategy = None from paddle.incubate.autograd import prim_enabled, prim2orig @@ -789,9 +786,16 @@ class _ExecutorCache(object): fetch_var_name=fetch_var_name, use_fetch_v2=True) - # If there are multiple blocks in the program, subblock will not be executed with the new executor in temporary - if program.num_blocks > 1: - warnings.warn("There are more than 1 block in program.") + if os.environ.get('FLAGS_CONVERT_GRAPH_TO_PROGRAM', None) in [ + 1, '1', True, 'True', 'true' + ] and not program._is_start_up_program_: + if program.num_blocks > 1: + # If there are multiple blocks in the program, subblock will not be executed with the new executor in temporary + logging.warning("There are more than 1 block in program.") + elif program.num_blocks == 1: + logging.warning("There are 1 block in program.") + else: + logging.warning("There are no block in program.") # standalone executor will apply buffer_shared_inplace_pass and # inplace_addto_op_pass to program according to build_strategy @@ -1667,10 +1671,6 @@ class Executor(object): else: tensor._copy_from(cpu_tensor, self.place) - warnings.warn( - "FLAGS_USE_STANDALONE_EXECUTOR is set to 1. New executor is used to execute Program." - ) - return new_exe.run(scope, list(feed.keys()), fetch_list, return_numpy) -- GitLab