未验证 提交 4fba3d5e 编写于 作者: J Jiabin Yang 提交者: GitHub

[Eager] Fix linspace error in amp (#46088)

* fix linspace error in amp

* fix log

* fix amp error
上级 be00a42f
...@@ -174,7 +174,7 @@ def GetDygraphLogName(string): ...@@ -174,7 +174,7 @@ def GetDygraphLogName(string):
arr = filter(None, text.split('_')) arr = filter(None, text.split('_'))
res = '' res = ''
for i in arr: for i in arr:
res = res + i[0].upper() + i[1:] res = res + i.lower()
return res return res
string = str2Hump(string) string = str2Hump(string)
......
...@@ -166,8 +166,11 @@ paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallV ...@@ -166,8 +166,11 @@ paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallV
{} {}
// Inplace Strategy // Inplace Strategy
{} {}
// Call grad_api function
VLOG(5) << \"Running C++ API: \" << \"{}\"; VLOG(5) << \"Running C++ API: \" << \"{}\";
// Before log info
{}
// Call grad_api function
{} {}
// Check NaN and Inf id needed // Check NaN and Inf id needed
{} {}
...@@ -195,8 +198,11 @@ FORWARD_FUNCTION_TEMPLATE = \ ...@@ -195,8 +198,11 @@ FORWARD_FUNCTION_TEMPLATE = \
{} {}
// Get Input AutoGradMeta // Get Input AutoGradMeta
{} {}
// Forward API Call
VLOG(5) << \"Running C++ API: \" << \"{}\"; VLOG(5) << \"Running C++ API: \" << \"{}\";
// Before log info
{}
// Forward API Call
{} {}
// Check NaN and Inf if needed // Check NaN and Inf if needed
{} {}
...@@ -220,7 +226,7 @@ FORWARD_FUNCTION_TEMPLATE = \ ...@@ -220,7 +226,7 @@ FORWARD_FUNCTION_TEMPLATE = \
}} }}
""" """
LOG_PRINT_TEMPLATE = \ AFTER_LOG_PRINT_TEMPLATE = \
""" """
if(VLOG_IS_ON(4)){{ if(VLOG_IS_ON(4)){{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], Output: [%s] }} \"; const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], Output: [%s] }} \";
...@@ -229,6 +235,14 @@ LOG_PRINT_TEMPLATE = \ ...@@ -229,6 +235,14 @@ LOG_PRINT_TEMPLATE = \
}} }}
""" """
BEFORE_LOG_PRINT_TEMPLATE = \
"""
if(VLOG_IS_ON(3)){{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s]}} \";
{}
VLOG(3) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str);
}}
"""
FORWARD_ONLY_FUNCTION_TEMPLATE = \ FORWARD_ONLY_FUNCTION_TEMPLATE = \
""" """
...@@ -240,8 +254,10 @@ FORWARD_ONLY_FUNCTION_TEMPLATE = \ ...@@ -240,8 +254,10 @@ FORWARD_ONLY_FUNCTION_TEMPLATE = \
{} {}
// Layout autotune // Layout autotune
{} {}
// Forward API Call
VLOG(5) << \"Running C++ API: \" << \"{}\"; VLOG(5) << \"Running C++ API: \" << \"{}\";
// Before log info
{}
// Forward API Call
{} {}
// Get Outputs // Get Outputs
{} {}
...@@ -1239,6 +1255,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): ...@@ -1239,6 +1255,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
returns_str = f"{returns_type_str}{{{returns_str}}}" returns_str = f"{returns_type_str}{{{returns_str}}}"
# Node Creation Pre-Processing # Node Creation Pre-Processing
inputs_names = []
if not self.is_forward_only: if not self.is_forward_only:
# 1. Get Input AutoGradMeta # 1. Get Input AutoGradMeta
inputs_autograd_meta_list = [] inputs_autograd_meta_list = []
...@@ -1374,12 +1391,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): ...@@ -1374,12 +1391,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";" var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";"
var_str += f"\n{indent} std::string input_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));" var_str += f"\n{indent} std::string input_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));"
var_str += f"\n{indent} input_str += input_{name}_str; " var_str += f"\n{indent} input_str += input_{name}_str; "
before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str)
for name, (ttype, pos) in forward_outputs_position_map.items(): for name, (ttype, pos) in forward_outputs_position_map.items():
var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";" var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \"({name}, [%s]), \";"
var_str += f"\n{indent} std::string output_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));" var_str += f"\n{indent} std::string output_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));"
var_str += f"\n{indent} output_str += output_{name}_str; " var_str += f"\n{indent} output_str += output_{name}_str; "
log_str = LOG_PRINT_TEMPLATE.format(var_str) log_str = AFTER_LOG_PRINT_TEMPLATE.format(var_str)
# Generate forward_definition_str and forward_declaration_str # Generate forward_definition_str and forward_declaration_str
if self.is_forward_only: if self.is_forward_only:
...@@ -1387,23 +1406,21 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): ...@@ -1387,23 +1406,21 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
amp_logic_str = "\n VLOG(7) << \" No AMP for {} because it has no input. \"; ".format( amp_logic_str = "\n VLOG(7) << \" No AMP for {} because it has no input. \"; ".format(
forward_ad_function_name) forward_ad_function_name)
self.forward_definition_str += FORWARD_ONLY_FUNCTION_TEMPLATE.format( self.forward_definition_str += FORWARD_ONLY_FUNCTION_TEMPLATE.format(
returns_type_str, returns_type_str, forward_ad_function_name,
forward_ad_function_name, inputs_args_definition_str, inputs_args_definition_str, forward_api_name, dygraph_event_str,
GetDygraphLogName(forward_api_name), dygraph_event_str,
amp_logic_str, layout_logic_str, forward_api_name, amp_logic_str, layout_logic_str, forward_api_name,
forward_call_str, get_outputs_str, forward_ad_function_name, before_log_str, forward_call_str, get_outputs_str,
log_str, returns_str) forward_api_name, log_str, returns_str)
else: else:
self.forward_definition_str += FORWARD_FUNCTION_TEMPLATE.format( self.forward_definition_str += FORWARD_FUNCTION_TEMPLATE.format(
returns_type_str, returns_type_str, forward_ad_function_name,
forward_ad_function_name, inputs_args_definition_str, inputs_args_definition_str, forward_api_name, dygraph_event_str,
GetDygraphLogName(forward_api_name), dygraph_event_str,
amp_logic_str, layout_logic_str, inputs_autograd_meta_str, amp_logic_str, layout_logic_str, inputs_autograd_meta_str,
forward_api_name, forward_call_str, check_nan_inf_str, forward_api_name, before_log_str, forward_call_str,
get_outputs_str, outputs_autograd_meta_str, check_nan_inf_str, get_outputs_str, outputs_autograd_meta_str,
compute_require_grad_args_str, check_inplace_str, compute_require_grad_args_str, check_inplace_str,
bump_inplace_version_str, node_creation_str, bump_inplace_version_str, node_creation_str, forward_api_name,
forward_ad_function_name, log_str, returns_str) log_str, returns_str)
self.forward_declaration_str += f"{returns_type_str} {forward_ad_function_name}({inputs_args_declaration_str});\n" self.forward_declaration_str += f"{returns_type_str} {forward_ad_function_name}({inputs_args_declaration_str});\n"
...@@ -1898,6 +1915,8 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): ...@@ -1898,6 +1915,8 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));"
var_str += f"\n{indent} input_str += input_{new_name}_str; " var_str += f"\n{indent} input_str += input_{new_name}_str; "
before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str)
for name, (ttype, fwd_position, for name, (ttype, fwd_position,
grad_api_position) in backward_grad_outputs_map.items(): grad_api_position) in backward_grad_outputs_map.items():
new_name = self.TransformToNextGradName(name) new_name = self.TransformToNextGradName(name)
...@@ -1905,16 +1924,16 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): ...@@ -1905,16 +1924,16 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
var_str += f"\n{indent} std::string output_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} std::string output_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));"
var_str += f"\n{indent} output_str += output_{new_name}_str; " var_str += f"\n{indent} output_str += output_{new_name}_str; "
log_str = LOG_PRINT_TEMPLATE.format(var_str) log_str = AFTER_LOG_PRINT_TEMPLATE.format(var_str)
self.node_definition_str = GRAD_FUNCTION_TEMPLATE.format( self.node_definition_str = GRAD_FUNCTION_TEMPLATE.format(
grad_node_name, GetDygraphLogName(self.backward_api_name), grad_node_name, self.backward_api_name, fill_zero_str,
fill_zero_str, get_grad_in_args_str, grad_function_prepare_str, get_grad_in_args_str, grad_function_prepare_str,
compute_require_next_grad_str, inplace_check_str, compute_require_next_grad_str, inplace_check_str,
inplace_for_grad_outs_str, self.backward_api_name, inplace_for_grad_outs_str, self.backward_api_name, before_log_str,
grad_function_call_str, check_nan_inf_str, grad_function_call_str, check_nan_inf_str,
outputs_autograd_meta_str, next_grad_node_creation_str, outputs_autograd_meta_str, next_grad_node_creation_str,
GetDygraphLogName(self.backward_api_name), log_str, returns_str) self.backward_api_name, log_str, returns_str)
def run(self): def run(self):
super().run() super().run()
......
...@@ -71,7 +71,6 @@ std::unordered_map<GradNodeBase*, int> getInDegreeMap( ...@@ -71,7 +71,6 @@ std::unordered_map<GradNodeBase*, int> getInDegreeMap(
// Enforce GradNode has TensorWrappers as Input // Enforce GradNode has TensorWrappers as Input
void EnforceGradNodeHasInput(GradNodeBase* node) { void EnforceGradNodeHasInput(GradNodeBase* node) {
VLOG(6) << "Running in EnforceGradNodeHasInput";
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
node->IsTensorWrappersCleared(), node->IsTensorWrappersCleared(),
true, true,
......
...@@ -87,7 +87,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast( ...@@ -87,7 +87,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
const std::string& op_name, const std::string& op_name,
bool trace_backward = true) { bool trace_backward = true) {
VLOG(6) << "AMP AmpAutoCasts:" VLOG(6) << "AMP AmpAutoCasts:"
<< " input(" << input_name << ") dst_dtype(" << " input(" << egr::EagerUtils::TensorStr(input) << " to dst_dtype("
<< paddle::framework::DataType2String(dst_dtype) << ")."; << paddle::framework::DataType2String(dst_dtype) << ").";
if (dst_dtype == paddle::experimental::DataType::FLOAT16) { if (dst_dtype == paddle::experimental::DataType::FLOAT16) {
if (op_name == "run_program") { if (op_name == "run_program") {
...@@ -107,6 +107,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast( ...@@ -107,6 +107,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
} }
} }
if (NeedCast(input, dst_dtype)) { if (NeedCast(input, dst_dtype)) {
VLOG(6) << "Input : " << input.impl() << "NeedCast";
return Cast(input, dst_dtype, trace_backward); return Cast(input, dst_dtype, trace_backward);
} }
return input; return input;
......
...@@ -257,8 +257,7 @@ class EagerUtils { ...@@ -257,8 +257,7 @@ class EagerUtils {
} else { } else {
tensor_name_str = t.name(); tensor_name_str = t.name();
} }
const char* TENSOR_INFO_TEMPLATE = const char* TENSOR_INFO_TEMPLATE = "Type: %s, Dtype: %s, Place: %s";
"{ Type: [ \"%s\" ], Dtype:[ \"%s\" ], Place:[ \"%s\" ] }";
std::string tensor_info_str = ""; std::string tensor_info_str = "";
if (t.defined()) { if (t.defined()) {
if (t.initialized()) { if (t.initialized()) {
...@@ -277,13 +276,13 @@ class EagerUtils { ...@@ -277,13 +276,13 @@ class EagerUtils {
} }
if (VLOG_IS_ON(6)) { if (VLOG_IS_ON(6)) {
const char* TENSOR_PRINT_TEMPLATE = const char* TENSOR_PRINT_TEMPLATE =
"{ Name:[ \"%s\" ], Initialized: [ \"%d\" ], Ptr: [ \"%d\" ] " "{Name: %s, Initialized: %d, Ptr: %d "
"TensorInfo: [ \"%s\" ], ADInfo:[ \"%s\" ] }"; "TensorInfo: [ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t); auto* ad_meta = nullable_autograd_meta(t);
if (!ad_meta && !(ad_meta->WeakGrad().lock().get())) { if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
std::string ad_info_str = ""; std::string ad_info_str = "";
const char* AD_INFO_TEMPLATE = const char* AD_INFO_TEMPLATE =
"{ Grad: [ \"%s\" ], GradNode: [ %s ], StopGradient: [ %d ] }"; "Grad: [ %s ], GradNode: [ %s ], StopGradient: [ %d ]";
ad_info_str += paddle::string::Sprintf(AD_INFO_TEMPLATE, ad_info_str += paddle::string::Sprintf(AD_INFO_TEMPLATE,
TensorStr(ad_meta->Grad()), TensorStr(ad_meta->Grad()),
GradNodeStr(t), GradNodeStr(t),
...@@ -304,8 +303,8 @@ class EagerUtils { ...@@ -304,8 +303,8 @@ class EagerUtils {
} }
} else if (VLOG_IS_ON(5)) { } else if (VLOG_IS_ON(5)) {
const char* TENSOR_PRINT_TEMPLATE = const char* TENSOR_PRINT_TEMPLATE =
"{ Name:[ \"%s\" ], Initialized: [ \"%d\" ], Ptr: [ \"%d\" ] " "{Name: %s, Initialized: %d , Ptr: %d "
"TensorInfo: [ \"%s\" ] }"; "TensorInfo: [ %s ]}";
return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE, return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE,
tensor_name_str, tensor_name_str,
t.initialized(), t.initialized(),
...@@ -313,7 +312,7 @@ class EagerUtils { ...@@ -313,7 +312,7 @@ class EagerUtils {
tensor_info_str); tensor_info_str);
} else if (VLOG_IS_ON(4)) { } else if (VLOG_IS_ON(4)) {
const char* TENSOR_PRINT_TEMPLATE = const char* TENSOR_PRINT_TEMPLATE =
"{ Name:[ \"%s\" ], Initialized: [ \"%d\" ], Ptr: [ \"%d\" ] }"; "{ Name: %s, Initialized: %d, Ptr: %d }";
return paddle::string::Sprintf( return paddle::string::Sprintf(
TENSOR_PRINT_TEMPLATE, tensor_name_str, t.initialized(), t.impl()); TENSOR_PRINT_TEMPLATE, tensor_name_str, t.initialized(), t.impl());
} else { } else {
...@@ -324,10 +323,10 @@ class EagerUtils { ...@@ -324,10 +323,10 @@ class EagerUtils {
static const std::string GradNodeStr(const egr::GradNodeBase& node) { static const std::string GradNodeStr(const egr::GradNodeBase& node) {
if (VLOG_IS_ON(6)) { if (VLOG_IS_ON(6)) {
const char* GRAD_NODE_TEMPLATE = const char* GRAD_NODE_TEMPLATE =
" { BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }"; "BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]";
const char* GRAD_SLOT_META_TEMPLATE = " {SlotSize: [%d]: %s} "; const char* GRAD_SLOT_META_TEMPLATE = " {SlotSize: [%d]: %s} ";
const char* SLOT_INFO_TEMPLATE = const char* SLOT_INFO_TEMPLATE =
" {SlotID: [\"%s\"], StopGradients: [ %s ], Edges[ %s ] }"; "SlotID: %s, StopGradients: %s, Edges[ %s ]";
auto out_metas = node.OutputMeta(); auto out_metas = node.OutputMeta();
auto in_metas = node.InputMeta(); auto in_metas = node.InputMeta();
std::string out_slot_str = ""; std::string out_slot_str = "";
...@@ -372,8 +371,8 @@ class EagerUtils { ...@@ -372,8 +371,8 @@ class EagerUtils {
GRAD_NODE_TEMPLATE, out_meta_str, in_meta_str); GRAD_NODE_TEMPLATE, out_meta_str, in_meta_str);
} else if (VLOG_IS_ON(5)) { } else if (VLOG_IS_ON(5)) {
const char* GRAD_NODE_TEMPLATE = const char* GRAD_NODE_TEMPLATE =
" { BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }"; "BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]";
const char* GRAD_SLOT_META_TEMPLATE = "SlotSize: [\"%d\"]"; const char* GRAD_SLOT_META_TEMPLATE = "SlotSize: %d";
std::string out_meta_str = paddle::string::Sprintf( std::string out_meta_str = paddle::string::Sprintf(
GRAD_SLOT_META_TEMPLATE, node.OutputMeta().size()); GRAD_SLOT_META_TEMPLATE, node.OutputMeta().size());
std::string in_meta_str = paddle::string::Sprintf( std::string in_meta_str = paddle::string::Sprintf(
...@@ -387,7 +386,7 @@ class EagerUtils { ...@@ -387,7 +386,7 @@ class EagerUtils {
static const std::string GradNodeStr(const paddle::experimental::Tensor& t) { static const std::string GradNodeStr(const paddle::experimental::Tensor& t) {
auto* ad_meta = nullable_autograd_meta(t); auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && !(ad_meta->GetMutableGradNode().get())) { if (ad_meta && (ad_meta->GetMutableGradNode().get())) {
return GradNodeStr((*ad_meta->GetMutableGradNode().get())); return GradNodeStr((*ad_meta->GetMutableGradNode().get()));
} else { } else {
return "None"; return "None";
......
...@@ -1464,13 +1464,16 @@ ...@@ -1464,13 +1464,16 @@
backward : linear_interp_grad backward : linear_interp_grad
- op : linspace - op : linspace
args : (Tensor start, Tensor stop, Tensor number, DataType dtype) args : (Tensor start, Tensor stop, Tensor number, DataType dtype, Place place)
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
func : LinspaceInferMeta func : LinspaceInferMeta
param: [start, stop, number, dtype]
kernel : kernel :
func : linspace func : linspace
param: [start, stop, number, dtype]
data_type : dtype data_type : dtype
backend : place
- op : log - op : log
args : (Tensor x) args : (Tensor x)
......
...@@ -1606,7 +1606,8 @@ def linspace(start, stop, num, dtype=None, name=None): ...@@ -1606,7 +1606,8 @@ def linspace(start, stop, num, dtype=None, name=None):
with device_guard("cpu"): with device_guard("cpu"):
tensor_num = fill_constant([1], 'int32', num) tensor_num = fill_constant([1], 'int32', num)
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype) return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype,
_current_expected_place())
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num, return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num,
'dtype', dtype) 'dtype', dtype)
......
...@@ -100,7 +100,8 @@ def linspace(start, stop, num, dtype=None, name=None): ...@@ -100,7 +100,8 @@ def linspace(start, stop, num, dtype=None, name=None):
with device_guard("cpu"): with device_guard("cpu"):
tensor_num = fill_constant([1], 'int32', num, force_cpu=True) tensor_num = fill_constant([1], 'int32', num, force_cpu=True)
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype) return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype,
_current_expected_place())
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num, return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num,
'dtype', dtype) 'dtype', dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册