未验证 提交 f639bc69 编写于 作者: P pangyoki 提交者: GitHub

add log while running New Executor, Old Executor and ParallelExecutor and change log level (#45814)

* optimize executor log

* delete log in new exe

* add log for old executor

* use LOG_FIRST_N(INFO, 1)
上级 81e06752
......@@ -183,6 +183,7 @@ void Executor::Run(const ProgramDesc& pdesc,
const std::vector<std::string>& skip_ref_cnt_vars,
bool force_disable_gc,
bool keep_kid_scopes) {
LOG_FIRST_N(INFO, 1) << "Old Executor is Running.";
platform::RecordEvent record_run(
"Executor::Run", platform::TracerEventType::UserDefined, 1);
platform::RecordBlock b(block_id);
......
......@@ -193,6 +193,7 @@ paddle::framework::FetchList InterpreterCore::Run(
#endif
if (!is_build_) {
LOG_FIRST_N(INFO, 1) << "New Executor is Running.";
paddle::framework::interpreter::build_variable_scope(
block_, &var_scope_, create_local_scope_);
......
......@@ -418,7 +418,6 @@ void build_op_func_list(const platform::Place& place,
: var_scope->GetMutableScope();
std::vector<std::unique_ptr<OperatorBase>>
ops_unique; // its elements will be moved to vec_func_list
bool flag_log_is_printed = false;
// Step 1: create all ops for current block.
create_all_ops(block, &ops_unique);
......@@ -443,6 +442,7 @@ void build_op_func_list(const platform::Place& place,
}
auto unused_var_map = get_unused_vars(block, ops);
bool flag_log_is_printed = false;
for (size_t i = 0; i < ops.size(); ++i) {
auto op = ops[i].get();
const std::string& op_type = op->Type();
......@@ -452,7 +452,7 @@ void build_op_func_list(const platform::Place& place,
// Print new executor log if grad op is used.
// It's only for test and will be removed later.
if (!flag_log_is_printed && op_type.find("_grad") != std::string::npos) {
VLOG(0) << "Standalone Executor is Used.";
LOG_FIRST_N(INFO, 1) << "Standalone Executor is Used.";
flag_log_is_printed = true;
}
......
......@@ -983,6 +983,7 @@ void ParallelExecutor::BCastParamsToDevices(
FetchUnmergedList ParallelExecutor::Run(
const std::vector<std::string> &fetch_tensors) {
LOG_FIRST_N(INFO, 1) << "ParallelExecutor is Running (Run).";
PreludeToRun(fetch_tensors);
platform::RecordBlock b(0);
......@@ -1000,6 +1001,7 @@ FetchUnmergedList ParallelExecutor::Run(
FetchList ParallelExecutor::RunAndMerge(
const std::vector<std::string> &fetch_tensors) {
LOG_FIRST_N(INFO, 1) << "ParallelExecutor is Running (RunAndMerge).";
PreludeToRun(fetch_tensors);
platform::RecordBlock b(0);
......
......@@ -771,9 +771,6 @@ class _ExecutorCache(object):
inner_program = converted_program
# print(f"Program after convert:\n {inner_program}", flush=True)
warnings.warn(
"FLAGS_USE_STANDALONE_EXECUTOR and FLAGS_CONVERT_GRAPH_TO_PROGRAM is set to 1. Graph will be converted to Program and executed using new executor."
)
else:
build_strategy = None
from paddle.incubate.autograd import prim_enabled, prim2orig
......@@ -789,9 +786,16 @@ class _ExecutorCache(object):
fetch_var_name=fetch_var_name,
use_fetch_v2=True)
# If there are multiple blocks in the program, subblock will not be executed with the new executor in temporary
if os.environ.get('FLAGS_CONVERT_GRAPH_TO_PROGRAM', None) in [
1, '1', True, 'True', 'true'
] and not program._is_start_up_program_:
if program.num_blocks > 1:
warnings.warn("There are more than 1 block in program.")
# If there are multiple blocks in the program, subblock will not be executed with the new executor in temporary
logging.warning("There are more than 1 block in program.")
elif program.num_blocks == 1:
logging.warning("There are 1 block in program.")
else:
logging.warning("There are no block in program.")
# standalone executor will apply buffer_shared_inplace_pass and
# inplace_addto_op_pass to program according to build_strategy
......@@ -1667,10 +1671,6 @@ class Executor(object):
else:
tensor._copy_from(cpu_tensor, self.place)
warnings.warn(
"FLAGS_USE_STANDALONE_EXECUTOR is set to 1. New executor is used to execute Program."
)
return new_exe.run(scope, list(feed.keys()), fetch_list,
return_numpy)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册