Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
4fba3d5e
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4fba3d5e
编写于
9月 16, 2022
作者:
J
Jiabin Yang
提交者:
GitHub
9月 16, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] Fix linspace error in amp (#46088)
* fix linspace error in amp * fix log * fix amp error
上级
be00a42f
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
65 addition
and
42 deletion
+65
-42
paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py
...luid/eager/auto_code_generator/generator/codegen_utils.py
+1
-1
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
...le/fluid/eager/auto_code_generator/generator/eager_gen.py
+41
-22
paddle/fluid/eager/backward.cc
paddle/fluid/eager/backward.cc
+0
-1
paddle/fluid/eager/eager_amp_auto_cast.h
paddle/fluid/eager/eager_amp_auto_cast.h
+2
-1
paddle/fluid/eager/utils.h
paddle/fluid/eager/utils.h
+13
-14
paddle/phi/api/yaml/legacy_ops.yaml
paddle/phi/api/yaml/legacy_ops.yaml
+4
-1
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+2
-1
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+2
-1
未找到文件。
paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py
浏览文件 @
4fba3d5e
...
...
@@ -174,7 +174,7 @@ def GetDygraphLogName(string):
arr
=
filter
(
None
,
text
.
split
(
'_'
))
res
=
''
for
i
in
arr
:
res
=
res
+
i
[
0
].
upper
()
+
i
[
1
:]
res
=
res
+
i
.
lower
()
return
res
string
=
str2Hump
(
string
)
...
...
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
浏览文件 @
4fba3d5e
...
...
@@ -166,8 +166,11 @@ paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallV
{}
// Inplace Strategy
{}
// Call grad_api function
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
// Before log info
{}
// Call grad_api function
{}
// Check NaN and Inf id needed
{}
...
...
@@ -195,8 +198,11 @@ FORWARD_FUNCTION_TEMPLATE = \
{}
// Get Input AutoGradMeta
{}
// Forward API Call
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
// Before log info
{}
// Forward API Call
{}
// Check NaN and Inf if needed
{}
...
...
@@ -220,7 +226,7 @@ FORWARD_FUNCTION_TEMPLATE = \
}}
"""
LOG_PRINT_TEMPLATE
=
\
AFTER_
LOG_PRINT_TEMPLATE
=
\
"""
if(VLOG_IS_ON(4)){{
const char* INPUT_PRINT_TEMPLATE =
\"
{{ Input: [%s], Output: [%s] }}
\"
;
...
...
@@ -229,6 +235,14 @@ LOG_PRINT_TEMPLATE = \
}}
"""
BEFORE_LOG_PRINT_TEMPLATE
=
\
"""
if(VLOG_IS_ON(3)){{
const char* INPUT_PRINT_TEMPLATE =
\"
{{ Input: [%s]}}
\"
;
{}
VLOG(3) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str);
}}
"""
FORWARD_ONLY_FUNCTION_TEMPLATE
=
\
"""
...
...
@@ -240,8 +254,10 @@ FORWARD_ONLY_FUNCTION_TEMPLATE = \
{}
// Layout autotune
{}
// Forward API Call
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
// Before log info
{}
// Forward API Call
{}
// Get Outputs
{}
...
...
@@ -1239,6 +1255,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
returns_str
=
f
"
{
returns_type_str
}
{{
{
returns_str
}
}}"
# Node Creation Pre-Processing
inputs_names
=
[]
if
not
self
.
is_forward_only
:
# 1. Get Input AutoGradMeta
inputs_autograd_meta_list
=
[]
...
...
@@ -1374,12 +1391,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
name
}
_str; "
before_log_str
=
BEFORE_LOG_PRINT_TEMPLATE
.
format
(
var_str
)
for
name
,
(
ttype
,
pos
)
in
forward_outputs_position_map
.
items
():
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
name
}
_str; "
log_str
=
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
log_str
=
AFTER_
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
# Generate forward_definition_str and forward_declaration_str
if
self
.
is_forward_only
:
...
...
@@ -1387,23 +1406,21 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
amp_logic_str
=
"
\n
VLOG(7) <<
\"
No AMP for {} because it has no input.
\"
; "
.
format
(
forward_ad_function_name
)
self
.
forward_definition_str
+=
FORWARD_ONLY_FUNCTION_TEMPLATE
.
format
(
returns_type_str
,
forward_ad_function_name
,
inputs_args_definition_str
,
GetDygraphLogName
(
forward_api_name
),
dygraph_event_str
,
returns_type_str
,
forward_ad_function_name
,
inputs_args_definition_str
,
forward_api_name
,
dygraph_event_str
,
amp_logic_str
,
layout_logic_str
,
forward_api_name
,
forward_call_str
,
get_outputs_str
,
forward_ad_function_name
,
log_str
,
returns_str
)
before_log_str
,
forward_call_str
,
get_outputs_str
,
forward_api_name
,
log_str
,
returns_str
)
else
:
self
.
forward_definition_str
+=
FORWARD_FUNCTION_TEMPLATE
.
format
(
returns_type_str
,
forward_ad_function_name
,
inputs_args_definition_str
,
GetDygraphLogName
(
forward_api_name
),
dygraph_event_str
,
returns_type_str
,
forward_ad_function_name
,
inputs_args_definition_str
,
forward_api_name
,
dygraph_event_str
,
amp_logic_str
,
layout_logic_str
,
inputs_autograd_meta_str
,
forward_api_name
,
forward_call_str
,
check_nan_inf
_str
,
get_outputs_str
,
outputs_autograd_meta_str
,
forward_api_name
,
before_log_str
,
forward_call
_str
,
check_nan_inf_str
,
get_outputs_str
,
outputs_autograd_meta_str
,
compute_require_grad_args_str
,
check_inplace_str
,
bump_inplace_version_str
,
node_creation_str
,
forward_ad_function_name
,
log_str
,
returns_str
)
bump_inplace_version_str
,
node_creation_str
,
forward_api_name
,
log_str
,
returns_str
)
self
.
forward_declaration_str
+=
f
"
{
returns_type_str
}
{
forward_ad_function_name
}
(
{
inputs_args_declaration_str
}
);
\n
"
...
...
@@ -1898,6 +1915,8 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
new_name
}
_str; "
before_log_str
=
BEFORE_LOG_PRINT_TEMPLATE
.
format
(
var_str
)
for
name
,
(
ttype
,
fwd_position
,
grad_api_position
)
in
backward_grad_outputs_map
.
items
():
new_name
=
self
.
TransformToNextGradName
(
name
)
...
...
@@ -1905,16 +1924,16 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
new_name
}
_str; "
log_str
=
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
log_str
=
AFTER_
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
self
.
node_definition_str
=
GRAD_FUNCTION_TEMPLATE
.
format
(
grad_node_name
,
GetDygraphLogName
(
self
.
backward_api_name
)
,
fill_zero_str
,
get_grad_in_args_str
,
grad_function_prepare_str
,
grad_node_name
,
self
.
backward_api_name
,
fill_zero_str
,
get_grad_in_args_str
,
grad_function_prepare_str
,
compute_require_next_grad_str
,
inplace_check_str
,
inplace_for_grad_outs_str
,
self
.
backward_api_name
,
inplace_for_grad_outs_str
,
self
.
backward_api_name
,
before_log_str
,
grad_function_call_str
,
check_nan_inf_str
,
outputs_autograd_meta_str
,
next_grad_node_creation_str
,
GetDygraphLogName
(
self
.
backward_api_name
)
,
log_str
,
returns_str
)
self
.
backward_api_name
,
log_str
,
returns_str
)
def
run
(
self
):
super
().
run
()
...
...
paddle/fluid/eager/backward.cc
浏览文件 @
4fba3d5e
...
...
@@ -71,7 +71,6 @@ std::unordered_map<GradNodeBase*, int> getInDegreeMap(
// Enforce GradNode has TensorWrappers as Input
void
EnforceGradNodeHasInput
(
GradNodeBase
*
node
)
{
VLOG
(
6
)
<<
"Running in EnforceGradNodeHasInput"
;
PADDLE_ENFORCE_NE
(
node
->
IsTensorWrappersCleared
(),
true
,
...
...
paddle/fluid/eager/eager_amp_auto_cast.h
浏览文件 @
4fba3d5e
...
...
@@ -87,7 +87,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
const
std
::
string
&
op_name
,
bool
trace_backward
=
true
)
{
VLOG
(
6
)
<<
"AMP AmpAutoCasts:"
<<
" input("
<<
input_name
<<
")
dst_dtype("
<<
" input("
<<
egr
::
EagerUtils
::
TensorStr
(
input
)
<<
" to
dst_dtype("
<<
paddle
::
framework
::
DataType2String
(
dst_dtype
)
<<
")."
;
if
(
dst_dtype
==
paddle
::
experimental
::
DataType
::
FLOAT16
)
{
if
(
op_name
==
"run_program"
)
{
...
...
@@ -107,6 +107,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
}
}
if
(
NeedCast
(
input
,
dst_dtype
))
{
VLOG
(
6
)
<<
"Input : "
<<
input
.
impl
()
<<
"NeedCast"
;
return
Cast
(
input
,
dst_dtype
,
trace_backward
);
}
return
input
;
...
...
paddle/fluid/eager/utils.h
浏览文件 @
4fba3d5e
...
...
@@ -257,8 +257,7 @@ class EagerUtils {
}
else
{
tensor_name_str
=
t
.
name
();
}
const
char
*
TENSOR_INFO_TEMPLATE
=
"{ Type: [
\"
%s
\"
], Dtype:[
\"
%s
\"
], Place:[
\"
%s
\"
] }"
;
const
char
*
TENSOR_INFO_TEMPLATE
=
"Type: %s, Dtype: %s, Place: %s"
;
std
::
string
tensor_info_str
=
""
;
if
(
t
.
defined
())
{
if
(
t
.
initialized
())
{
...
...
@@ -277,13 +276,13 @@ class EagerUtils {
}
if
(
VLOG_IS_ON
(
6
))
{
const
char
*
TENSOR_PRINT_TEMPLATE
=
"{
Name:[
\"
%s
\"
], Initialized: [
\"
%d
\"
], Ptr: [
\"
%d
\"
]
"
"TensorInfo: [
\"
%s
\"
], ADInfo:[
\"
%s
\"
]
}"
;
"{
Name: %s, Initialized: %d, Ptr: %d
"
"TensorInfo: [
%s ], ADInfo:[ %s ]
}"
;
auto
*
ad_meta
=
nullable_autograd_meta
(
t
);
if
(
!
ad_meta
&&
!
(
ad_meta
->
WeakGrad
().
lock
().
get
()))
{
if
(
ad_meta
&&
(
ad_meta
->
WeakGrad
().
lock
().
get
()))
{
std
::
string
ad_info_str
=
""
;
const
char
*
AD_INFO_TEMPLATE
=
"
{ Grad: [
\"
%s
\"
], GradNode: [ %s ], StopGradient: [ %d ] }
"
;
"
Grad: [ %s ], GradNode: [ %s ], StopGradient: [ %d ]
"
;
ad_info_str
+=
paddle
::
string
::
Sprintf
(
AD_INFO_TEMPLATE
,
TensorStr
(
ad_meta
->
Grad
()),
GradNodeStr
(
t
),
...
...
@@ -304,8 +303,8 @@ class EagerUtils {
}
}
else
if
(
VLOG_IS_ON
(
5
))
{
const
char
*
TENSOR_PRINT_TEMPLATE
=
"{
Name:[
\"
%s
\"
], Initialized: [
\"
%d
\"
], Ptr: [
\"
%d
\"
]
"
"TensorInfo: [
\"
%s
\"
]
}"
;
"{
Name: %s, Initialized: %d , Ptr: %d
"
"TensorInfo: [
%s ]
}"
;
return
paddle
::
string
::
Sprintf
(
TENSOR_PRINT_TEMPLATE
,
tensor_name_str
,
t
.
initialized
(),
...
...
@@ -313,7 +312,7 @@ class EagerUtils {
tensor_info_str
);
}
else
if
(
VLOG_IS_ON
(
4
))
{
const
char
*
TENSOR_PRINT_TEMPLATE
=
"{ Name:
[
\"
%s
\"
], Initialized: [
\"
%d
\"
], Ptr: [
\"
%d
\"
]
}"
;
"{ Name:
%s, Initialized: %d, Ptr: %d
}"
;
return
paddle
::
string
::
Sprintf
(
TENSOR_PRINT_TEMPLATE
,
tensor_name_str
,
t
.
initialized
(),
t
.
impl
());
}
else
{
...
...
@@ -324,10 +323,10 @@ class EagerUtils {
static
const
std
::
string
GradNodeStr
(
const
egr
::
GradNodeBase
&
node
)
{
if
(
VLOG_IS_ON
(
6
))
{
const
char
*
GRAD_NODE_TEMPLATE
=
"
{ BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }
"
;
"
BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]
"
;
const
char
*
GRAD_SLOT_META_TEMPLATE
=
" {SlotSize: [%d]: %s} "
;
const
char
*
SLOT_INFO_TEMPLATE
=
"
{SlotID: [
\"
%s
\"
], StopGradients: [ %s ], Edges[ %s ] }
"
;
"
SlotID: %s, StopGradients: %s, Edges[ %s ]
"
;
auto
out_metas
=
node
.
OutputMeta
();
auto
in_metas
=
node
.
InputMeta
();
std
::
string
out_slot_str
=
""
;
...
...
@@ -372,8 +371,8 @@ class EagerUtils {
GRAD_NODE_TEMPLATE
,
out_meta_str
,
in_meta_str
);
}
else
if
(
VLOG_IS_ON
(
5
))
{
const
char
*
GRAD_NODE_TEMPLATE
=
"
{ BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }
"
;
const
char
*
GRAD_SLOT_META_TEMPLATE
=
"SlotSize:
[
\"
%d
\"
]
"
;
"
BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]
"
;
const
char
*
GRAD_SLOT_META_TEMPLATE
=
"SlotSize:
%d
"
;
std
::
string
out_meta_str
=
paddle
::
string
::
Sprintf
(
GRAD_SLOT_META_TEMPLATE
,
node
.
OutputMeta
().
size
());
std
::
string
in_meta_str
=
paddle
::
string
::
Sprintf
(
...
...
@@ -387,7 +386,7 @@ class EagerUtils {
static
const
std
::
string
GradNodeStr
(
const
paddle
::
experimental
::
Tensor
&
t
)
{
auto
*
ad_meta
=
nullable_autograd_meta
(
t
);
if
(
ad_meta
&&
!
(
ad_meta
->
GetMutableGradNode
().
get
()))
{
if
(
ad_meta
&&
(
ad_meta
->
GetMutableGradNode
().
get
()))
{
return
GradNodeStr
((
*
ad_meta
->
GetMutableGradNode
().
get
()));
}
else
{
return
"None"
;
...
...
paddle/phi/api/yaml/legacy_ops.yaml
浏览文件 @
4fba3d5e
...
...
@@ -1464,13 +1464,16 @@
backward
:
linear_interp_grad
-
op
:
linspace
args
:
(Tensor start, Tensor stop, Tensor number, DataType dtype)
args
:
(Tensor start, Tensor stop, Tensor number, DataType dtype
, Place place
)
output
:
Tensor(out)
infer_meta
:
func
:
LinspaceInferMeta
param
:
[
start
,
stop
,
number
,
dtype
]
kernel
:
func
:
linspace
param
:
[
start
,
stop
,
number
,
dtype
]
data_type
:
dtype
backend
:
place
-
op
:
log
args
:
(Tensor x)
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
4fba3d5e
...
...
@@ -1606,7 +1606,8 @@ def linspace(start, stop, num, dtype=None, name=None):
with
device_guard
(
"cpu"
):
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
)
if
in_dygraph_mode
():
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
)
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
...
...
python/paddle/tensor/creation.py
浏览文件 @
4fba3d5e
...
...
@@ -100,7 +100,8 @@ def linspace(start, stop, num, dtype=None, name=None):
with
device_guard
(
"cpu"
):
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
,
force_cpu
=
True
)
if
in_dygraph_mode
():
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
)
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录