From 5e878ecc8f425c2133a391534531a8a88cdc769b Mon Sep 17 00:00:00 2001 From: Jiabin Yang <360788950@qq.com> Date: Fri, 23 Sep 2022 11:00:47 +0800 Subject: [PATCH] optimize log (#46349) --- paddle/fluid/eager/accumulation/accumulation_node.cc | 4 ++++ .../eager/auto_code_generator/generator/eager_gen.py | 10 +++++----- paddle/fluid/eager/backward.cc | 3 +-- paddle/fluid/eager/general_grad.h | 3 +++ 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index 12bbfbbb25..4e5afca78a 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -31,12 +31,16 @@ static void CopyOrAddTensor(paddle::experimental::Tensor* tensor, const paddle::experimental::Tensor& t, bool is_fake_empty) { if (is_fake_empty) { + VLOG(3) << "Move Tensor ptr: " << t.impl(); *tensor = t; } else { if (!tensor->defined() || !tensor->initialized()) { // Simply copy tensor->impl + VLOG(3) << "Move Tensor ptr: " << t.impl(); *tensor = t; } else { + VLOG(3) << "Add Tensor ptr: " << t.impl() + << " with Tensor ptr: " << tensor->impl(); // Accumulation if (LIKELY(t.is_dense_tensor())) { if (LIKELY(tensor->is_dense_tensor())) { diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 23ed9e4c26..5ae57faa37 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -1414,13 +1414,13 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): var_str = f"\n{indent} std::string input_str = \"\";" var_str += f"\n{indent} std::string output_str = \"\";" for name, (ttype, pos) in forward_inputs_position_map.items(): - var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";" + var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \" \\n( {name} , [%s]), \";" var_str += f"\n{indent} std::string input_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));" var_str += f"\n{indent} input_str += input_{name}_str; " before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str) for name, (ttype, pos) in forward_outputs_position_map.items(): - var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";" + var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \" \\n( {name} , [%s]), \";" var_str += f"\n{indent} std::string output_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));" var_str += f"\n{indent} output_str += output_{name}_str; " @@ -1930,14 +1930,14 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): for name, (ttype, fwd_position, grad_api_position) in backward_grad_inputs_map.items(): new_name = self.TransformToNextGradName(name) - var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \"({new_name}, [%s]), \";" + var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n( {new_name} , [%s]), \";" var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} input_str += input_{new_name}_str; " for name, (backward_input_type, is_fwd_input, grad_api_position), in backward_forward_inputs_map.items(): new_name = self.TransformToNextGradName(name) - var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \"({new_name}, [%s]), \";" + var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n( {new_name} , [%s]), \";" var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} input_str += input_{new_name}_str; " @@ -1946,7 +1946,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): for name, (ttype, fwd_position, grad_api_position) in backward_grad_outputs_map.items(): new_name = self.TransformToNextGradName(name) - var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \"({new_name}, [%s]), \";" + var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n ( {new_name} , [%s]), \";" var_str += f"\n{indent} std::string output_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} output_str += output_{new_name}_str; " diff --git a/paddle/fluid/eager/backward.cc b/paddle/fluid/eager/backward.cc index 6b75b8d9e3..20709d1316 100644 --- a/paddle/fluid/eager/backward.cc +++ b/paddle/fluid/eager/backward.cc @@ -226,7 +226,6 @@ std::vector RunBackward( while (!queue.empty()) { GradNodeBase* node = queue.front(); VLOG(3) << "Preparing GradNode:" << node->name() << " addr:" << node; - VLOG(4) << EagerUtils::GradNodeStr(*node); paddle::platform::RecordEvent node_record_event( std::string((*node).name()), paddle::platform::TracerEventType::Operator, @@ -338,7 +337,7 @@ std::vector RunBackward( node_input_buffers_dict[next_node] = std::move(grad_tensor_holder); } - VLOG(3) << "Sum grad inputs for edge slot: " << edge_rank.first + VLOG(3) << "Sum or Move grad inputs for edge slot: " << edge_rank.first << ", rank: " << edge_rank.second; node_input_buffers_dict[next_node]->add(edge_rank.first, diff --git a/paddle/fluid/eager/general_grad.h b/paddle/fluid/eager/general_grad.h index 554afcd8cc..3823255de2 100644 --- a/paddle/fluid/eager/general_grad.h +++ b/paddle/fluid/eager/general_grad.h @@ -557,6 +557,9 @@ class GeneralGrad { } else { copied_next_node = orig_next_node->Copy(); orig_to_copied_node_map_[orig_next_node.get()] = copied_next_node; + VLOG(3) << "Copied Node: " << orig_next_node->name() + << " ptr: " << orig_next_node + << " to ptr: " << copied_next_node; copied_grad_nodes_.push_back(copied_next_node); } -- GitLab