From e223cf7b7f5a45662ca5438e6ab1ae9a77501073 Mon Sep 17 00:00:00 2001 From: pangyoki Date: Wed, 14 Sep 2022 14:01:34 +0800 Subject: [PATCH] delete new executor log (#45917) --- .../new_executor/interpretercore_util.cc | 8 -------- python/paddle/fluid/executor.py | 20 +++++++++---------- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/paddle/fluid/framework/new_executor/interpretercore_util.cc b/paddle/fluid/framework/new_executor/interpretercore_util.cc index a11ac6ff981..6c148c5892a 100644 --- a/paddle/fluid/framework/new_executor/interpretercore_util.cc +++ b/paddle/fluid/framework/new_executor/interpretercore_util.cc @@ -418,7 +418,6 @@ void build_op_func_list(const platform::Place& place, : var_scope->GetMutableScope(); std::vector> ops_unique; // its elements will be moved to vec_func_list - bool flag_log_is_printed = false; // Step 1: create all ops for current block. create_all_ops(block, &ops_unique); @@ -449,13 +448,6 @@ void build_op_func_list(const platform::Place& place, VLOG(6) << "Build OpFuncNode from : " << op_type; - // Print new executor log if grad op is used. - // It's only for test and will be removed later. - if (!flag_log_is_printed && op_type.find("_grad") != std::string::npos) { - VLOG(0) << "Standalone Executor is Used."; - flag_log_is_printed = true; - } - // Hot fix for variables used in dataloader, like // 'lod_tensor_blocking_queue_0'. These variables may be created in scope, // and it is not existed as variable in program. diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 051d902a27a..c77def7ebf1 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -771,9 +771,6 @@ class _ExecutorCache(object): inner_program = converted_program # print(f"Program after convert:\n {inner_program}", flush=True) - warnings.warn( - "FLAGS_USE_STANDALONE_EXECUTOR and FLAGS_CONVERT_GRAPH_TO_PROGRAM is set to 1. Graph will be converted to Program and executed using new executor." - ) else: build_strategy = None from paddle.incubate.autograd import prim_enabled, prim2orig @@ -789,9 +786,16 @@ class _ExecutorCache(object): fetch_var_name=fetch_var_name, use_fetch_v2=True) - # If there are multiple blocks in the program, subblock will not be executed with the new executor in temporary - if program.num_blocks > 1: - warnings.warn("There are more than 1 block in program.") + if os.environ.get('FLAGS_CONVERT_GRAPH_TO_PROGRAM', None) in [ + 1, '1', True, 'True', 'true' + ] and not program._is_start_up_program_: + if program.num_blocks > 1: + # If there are multiple blocks in the program, subblock will not be executed with the new executor in temporary + logging.warning("There are more than 1 block in program.") + elif program.num_blocks == 1: + logging.warning("There are 1 block in program.") + else: + logging.warning("There are no block in program.") # standalone executor will apply buffer_shared_inplace_pass and # inplace_addto_op_pass to program according to build_strategy @@ -1654,10 +1658,6 @@ class Executor(object): else: tensor._copy_from(cpu_tensor, self.place) - warnings.warn( - "FLAGS_USE_STANDALONE_EXECUTOR is set to 1. New executor is used to execute Program." - ) - return new_exe.run(scope, list(feed.keys()), fetch_list, return_numpy) -- GitLab