diff --git a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py index fcc66893a7164ba4b8329f67fe9ae33e97e887dc..2dc62ff349a73a5b59b6de47fb969f9670f0d649 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py +++ b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py @@ -174,7 +174,7 @@ def GetDygraphLogName(string): arr = filter(None, text.split('_')) res = '' for i in arr: - res = res + i[0].upper() + i[1:] + res = res + i.lower() return res string = str2Hump(string) diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index e22c63225813ebdb779342d63f0a77e4e254c4eb..f0f687309efa7593be25d40e668c07ac4c3bc6df 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -166,8 +166,11 @@ paddle::small_vector, egr::kSlotSmallV {} // Inplace Strategy {} - // Call grad_api function + VLOG(5) << \"Running C++ API: \" << \"{}\"; + // Before log info +{} + // Call grad_api function {} // Check NaN and Inf id needed {} @@ -195,8 +198,11 @@ FORWARD_FUNCTION_TEMPLATE = \ {} // Get Input AutoGradMeta {} - // Forward API Call + VLOG(5) << \"Running C++ API: \" << \"{}\"; + // Before log info +{} + // Forward API Call {} // Check NaN and Inf if needed {} @@ -220,7 +226,7 @@ FORWARD_FUNCTION_TEMPLATE = \ }} """ -LOG_PRINT_TEMPLATE = \ +AFTER_LOG_PRINT_TEMPLATE = \ """ if(VLOG_IS_ON(4)){{ const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], Output: [%s] }} \"; @@ -229,6 +235,14 @@ LOG_PRINT_TEMPLATE = \ }} """ +BEFORE_LOG_PRINT_TEMPLATE = \ +""" + if(VLOG_IS_ON(3)){{ + const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s]}} \"; + {} + VLOG(3) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str); + }} +""" FORWARD_ONLY_FUNCTION_TEMPLATE = \ """ @@ -240,8 +254,10 @@ FORWARD_ONLY_FUNCTION_TEMPLATE = \ {} // Layout autotune {} - // Forward API Call VLOG(5) << \"Running C++ API: \" << \"{}\"; + // Before log info +{} + // Forward API Call {} // Get Outputs {} @@ -1239,6 +1255,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): returns_str = f"{returns_type_str}{{{returns_str}}}" # Node Creation Pre-Processing + inputs_names = [] if not self.is_forward_only: # 1. Get Input AutoGradMeta inputs_autograd_meta_list = [] @@ -1374,12 +1391,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";" var_str += f"\n{indent} std::string input_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));" var_str += f"\n{indent} input_str += input_{name}_str; " + + before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str) for name, (ttype, pos) in forward_outputs_position_map.items(): var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";" var_str += f"\n{indent} std::string output_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));" var_str += f"\n{indent} output_str += output_{name}_str; " - log_str = LOG_PRINT_TEMPLATE.format(var_str) + log_str = AFTER_LOG_PRINT_TEMPLATE.format(var_str) # Generate forward_definition_str and forward_declaration_str if self.is_forward_only: @@ -1387,23 +1406,21 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): amp_logic_str = "\n VLOG(7) << \" No AMP for {} because it has no input. \"; ".format( forward_ad_function_name) self.forward_definition_str += FORWARD_ONLY_FUNCTION_TEMPLATE.format( - returns_type_str, - forward_ad_function_name, inputs_args_definition_str, - GetDygraphLogName(forward_api_name), dygraph_event_str, + returns_type_str, forward_ad_function_name, + inputs_args_definition_str, forward_api_name, dygraph_event_str, amp_logic_str, layout_logic_str, forward_api_name, - forward_call_str, get_outputs_str, forward_ad_function_name, - log_str, returns_str) + before_log_str, forward_call_str, get_outputs_str, + forward_api_name, log_str, returns_str) else: self.forward_definition_str += FORWARD_FUNCTION_TEMPLATE.format( - returns_type_str, - forward_ad_function_name, inputs_args_definition_str, - GetDygraphLogName(forward_api_name), dygraph_event_str, + returns_type_str, forward_ad_function_name, + inputs_args_definition_str, forward_api_name, dygraph_event_str, amp_logic_str, layout_logic_str, inputs_autograd_meta_str, - forward_api_name, forward_call_str, check_nan_inf_str, - get_outputs_str, outputs_autograd_meta_str, + forward_api_name, before_log_str, forward_call_str, + check_nan_inf_str, get_outputs_str, outputs_autograd_meta_str, compute_require_grad_args_str, check_inplace_str, - bump_inplace_version_str, node_creation_str, - forward_ad_function_name, log_str, returns_str) + bump_inplace_version_str, node_creation_str, forward_api_name, + log_str, returns_str) self.forward_declaration_str += f"{returns_type_str} {forward_ad_function_name}({inputs_args_declaration_str});\n" @@ -1898,6 +1915,8 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} input_str += input_{new_name}_str; " + before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str) + for name, (ttype, fwd_position, grad_api_position) in backward_grad_outputs_map.items(): new_name = self.TransformToNextGradName(name) @@ -1905,16 +1924,16 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): var_str += f"\n{indent} std::string output_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} output_str += output_{new_name}_str; " - log_str = LOG_PRINT_TEMPLATE.format(var_str) + log_str = AFTER_LOG_PRINT_TEMPLATE.format(var_str) self.node_definition_str = GRAD_FUNCTION_TEMPLATE.format( - grad_node_name, GetDygraphLogName(self.backward_api_name), - fill_zero_str, get_grad_in_args_str, grad_function_prepare_str, + grad_node_name, self.backward_api_name, fill_zero_str, + get_grad_in_args_str, grad_function_prepare_str, compute_require_next_grad_str, inplace_check_str, - inplace_for_grad_outs_str, self.backward_api_name, + inplace_for_grad_outs_str, self.backward_api_name, before_log_str, grad_function_call_str, check_nan_inf_str, outputs_autograd_meta_str, next_grad_node_creation_str, - GetDygraphLogName(self.backward_api_name), log_str, returns_str) + self.backward_api_name, log_str, returns_str) def run(self): super().run() diff --git a/paddle/fluid/eager/backward.cc b/paddle/fluid/eager/backward.cc index 04541d082c435a96ab352022cca0239694d66976..b80d0830660fcf70700ff6366e8d32d1cb779fb7 100644 --- a/paddle/fluid/eager/backward.cc +++ b/paddle/fluid/eager/backward.cc @@ -71,7 +71,6 @@ std::unordered_map getInDegreeMap( // Enforce GradNode has TensorWrappers as Input void EnforceGradNodeHasInput(GradNodeBase* node) { - VLOG(6) << "Running in EnforceGradNodeHasInput"; PADDLE_ENFORCE_NE( node->IsTensorWrappersCleared(), true, diff --git a/paddle/fluid/eager/eager_amp_auto_cast.h b/paddle/fluid/eager/eager_amp_auto_cast.h index 118c8be861122d8050f65306ff547ef9e799bd95..42961b84bcdb0286c46ff76097642af47ae5c34d 100644 --- a/paddle/fluid/eager/eager_amp_auto_cast.h +++ b/paddle/fluid/eager/eager_amp_auto_cast.h @@ -87,7 +87,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast( const std::string& op_name, bool trace_backward = true) { VLOG(6) << "AMP AmpAutoCasts:" - << " input(" << input_name << ") dst_dtype(" + << " input(" << egr::EagerUtils::TensorStr(input) << " to dst_dtype(" << paddle::framework::DataType2String(dst_dtype) << ")."; if (dst_dtype == paddle::experimental::DataType::FLOAT16) { if (op_name == "run_program") { @@ -107,6 +107,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast( } } if (NeedCast(input, dst_dtype)) { + VLOG(6) << "Input : " << input.impl() << "NeedCast"; return Cast(input, dst_dtype, trace_backward); } return input; diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index 0f9460febbc5d5b7f91fb476421b25ceb5841dab..291d96ff0809f2dd4230782033b918cd23e4568a 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -257,8 +257,7 @@ class EagerUtils { } else { tensor_name_str = t.name(); } - const char* TENSOR_INFO_TEMPLATE = - "{ Type: [ \"%s\" ], Dtype:[ \"%s\" ], Place:[ \"%s\" ] }"; + const char* TENSOR_INFO_TEMPLATE = "Type: %s, Dtype: %s, Place: %s"; std::string tensor_info_str = ""; if (t.defined()) { if (t.initialized()) { @@ -277,13 +276,13 @@ class EagerUtils { } if (VLOG_IS_ON(6)) { const char* TENSOR_PRINT_TEMPLATE = - "{ Name:[ \"%s\" ], Initialized: [ \"%d\" ], Ptr: [ \"%d\" ] " - "TensorInfo: [ \"%s\" ], ADInfo:[ \"%s\" ] }"; + "{Name: %s, Initialized: %d, Ptr: %d " + "TensorInfo: [ %s ], ADInfo:[ %s ]}"; auto* ad_meta = nullable_autograd_meta(t); - if (!ad_meta && !(ad_meta->WeakGrad().lock().get())) { + if (ad_meta && (ad_meta->WeakGrad().lock().get())) { std::string ad_info_str = ""; const char* AD_INFO_TEMPLATE = - "{ Grad: [ \"%s\" ], GradNode: [ %s ], StopGradient: [ %d ] }"; + "Grad: [ %s ], GradNode: [ %s ], StopGradient: [ %d ]"; ad_info_str += paddle::string::Sprintf(AD_INFO_TEMPLATE, TensorStr(ad_meta->Grad()), GradNodeStr(t), @@ -304,8 +303,8 @@ class EagerUtils { } } else if (VLOG_IS_ON(5)) { const char* TENSOR_PRINT_TEMPLATE = - "{ Name:[ \"%s\" ], Initialized: [ \"%d\" ], Ptr: [ \"%d\" ] " - "TensorInfo: [ \"%s\" ] }"; + "{Name: %s, Initialized: %d , Ptr: %d " + "TensorInfo: [ %s ]}"; return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE, tensor_name_str, t.initialized(), @@ -313,7 +312,7 @@ class EagerUtils { tensor_info_str); } else if (VLOG_IS_ON(4)) { const char* TENSOR_PRINT_TEMPLATE = - "{ Name:[ \"%s\" ], Initialized: [ \"%d\" ], Ptr: [ \"%d\" ] }"; + "{ Name: %s, Initialized: %d, Ptr: %d }"; return paddle::string::Sprintf( TENSOR_PRINT_TEMPLATE, tensor_name_str, t.initialized(), t.impl()); } else { @@ -324,10 +323,10 @@ class EagerUtils { static const std::string GradNodeStr(const egr::GradNodeBase& node) { if (VLOG_IS_ON(6)) { const char* GRAD_NODE_TEMPLATE = - " { BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }"; + "BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]"; const char* GRAD_SLOT_META_TEMPLATE = " {SlotSize: [%d]: %s} "; const char* SLOT_INFO_TEMPLATE = - " {SlotID: [\"%s\"], StopGradients: [ %s ], Edges[ %s ] }"; + "SlotID: %s, StopGradients: %s, Edges[ %s ]"; auto out_metas = node.OutputMeta(); auto in_metas = node.InputMeta(); std::string out_slot_str = ""; @@ -372,8 +371,8 @@ class EagerUtils { GRAD_NODE_TEMPLATE, out_meta_str, in_meta_str); } else if (VLOG_IS_ON(5)) { const char* GRAD_NODE_TEMPLATE = - " { BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }"; - const char* GRAD_SLOT_META_TEMPLATE = "SlotSize: [\"%d\"]"; + "BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]"; + const char* GRAD_SLOT_META_TEMPLATE = "SlotSize: %d"; std::string out_meta_str = paddle::string::Sprintf( GRAD_SLOT_META_TEMPLATE, node.OutputMeta().size()); std::string in_meta_str = paddle::string::Sprintf( @@ -387,7 +386,7 @@ class EagerUtils { static const std::string GradNodeStr(const paddle::experimental::Tensor& t) { auto* ad_meta = nullable_autograd_meta(t); - if (ad_meta && !(ad_meta->GetMutableGradNode().get())) { + if (ad_meta && (ad_meta->GetMutableGradNode().get())) { return GradNodeStr((*ad_meta->GetMutableGradNode().get())); } else { return "None"; diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index e69d54eb1e238db0787b4b9f4e1e096d572c3fa5..a8e2cda3896fbac18a82ce72b48367a6877a3a12 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -1464,13 +1464,16 @@ backward : linear_interp_grad - op : linspace - args : (Tensor start, Tensor stop, Tensor number, DataType dtype) + args : (Tensor start, Tensor stop, Tensor number, DataType dtype, Place place) output : Tensor(out) infer_meta : func : LinspaceInferMeta + param: [start, stop, number, dtype] kernel : func : linspace + param: [start, stop, number, dtype] data_type : dtype + backend : place - op : log args : (Tensor x) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 81640339bdc47ada0c27d86dd20b1de1ba0b6858..8ceb33188ebb2c0cebe617de575f944cb62aeb56 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -1606,7 +1606,8 @@ def linspace(start, stop, num, dtype=None, name=None): with device_guard("cpu"): tensor_num = fill_constant([1], 'int32', num) if in_dygraph_mode(): - return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype) + return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype, + _current_expected_place()) if _in_legacy_dygraph(): return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype', dtype) diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index df5ad2814437f902dea7e30902ad97ef262bf80d..dce1896743c08f240826af96591df3899b65fed0 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -100,7 +100,8 @@ def linspace(start, stop, num, dtype=None, name=None): with device_guard("cpu"): tensor_num = fill_constant([1], 'int32', num, force_cpu=True) if in_dygraph_mode(): - return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype) + return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype, + _current_expected_place()) if _in_legacy_dygraph(): return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype', dtype)