Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
38c0fd02
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
38c0fd02
编写于
9月 20, 2022
作者:
J
Jiabin Yang
提交者:
GitHub
9月 20, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] Fix linspace error in amp (#46088) (#46206)
* fix linspace error in amp * fix log * fix amp error
上级
bc92d5f5
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
65 addition
and
42 deletion
+65
-42
paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py
...luid/eager/auto_code_generator/generator/codegen_utils.py
+1
-1
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
...le/fluid/eager/auto_code_generator/generator/eager_gen.py
+41
-22
paddle/fluid/eager/backward.cc
paddle/fluid/eager/backward.cc
+0
-1
paddle/fluid/eager/eager_amp_auto_cast.h
paddle/fluid/eager/eager_amp_auto_cast.h
+2
-1
paddle/fluid/eager/utils.h
paddle/fluid/eager/utils.h
+13
-14
paddle/phi/api/yaml/legacy_ops.yaml
paddle/phi/api/yaml/legacy_ops.yaml
+4
-1
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+2
-1
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+2
-1
未找到文件。
paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py
浏览文件 @
38c0fd02
...
@@ -174,7 +174,7 @@ def GetDygraphLogName(string):
...
@@ -174,7 +174,7 @@ def GetDygraphLogName(string):
arr
=
filter
(
None
,
text
.
split
(
'_'
))
arr
=
filter
(
None
,
text
.
split
(
'_'
))
res
=
''
res
=
''
for
i
in
arr
:
for
i
in
arr
:
res
=
res
+
i
[
0
].
upper
()
+
i
[
1
:]
res
=
res
+
i
.
lower
()
return
res
return
res
string
=
str2Hump
(
string
)
string
=
str2Hump
(
string
)
...
...
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
浏览文件 @
38c0fd02
...
@@ -166,8 +166,11 @@ paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallV
...
@@ -166,8 +166,11 @@ paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallV
{}
{}
// Inplace Strategy
// Inplace Strategy
{}
{}
// Call grad_api function
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
// Before log info
{}
// Call grad_api function
{}
{}
// Check NaN and Inf id needed
// Check NaN and Inf id needed
{}
{}
...
@@ -195,8 +198,11 @@ FORWARD_FUNCTION_TEMPLATE = \
...
@@ -195,8 +198,11 @@ FORWARD_FUNCTION_TEMPLATE = \
{}
{}
// Get Input AutoGradMeta
// Get Input AutoGradMeta
{}
{}
// Forward API Call
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
// Before log info
{}
// Forward API Call
{}
{}
// Check NaN and Inf if needed
// Check NaN and Inf if needed
{}
{}
...
@@ -220,7 +226,7 @@ FORWARD_FUNCTION_TEMPLATE = \
...
@@ -220,7 +226,7 @@ FORWARD_FUNCTION_TEMPLATE = \
}}
}}
"""
"""
LOG_PRINT_TEMPLATE
=
\
AFTER_
LOG_PRINT_TEMPLATE
=
\
"""
"""
if(VLOG_IS_ON(4)){{
if(VLOG_IS_ON(4)){{
const char* INPUT_PRINT_TEMPLATE =
\"
{{ Input: [%s], Output: [%s] }}
\"
;
const char* INPUT_PRINT_TEMPLATE =
\"
{{ Input: [%s], Output: [%s] }}
\"
;
...
@@ -229,6 +235,14 @@ LOG_PRINT_TEMPLATE = \
...
@@ -229,6 +235,14 @@ LOG_PRINT_TEMPLATE = \
}}
}}
"""
"""
BEFORE_LOG_PRINT_TEMPLATE
=
\
"""
if(VLOG_IS_ON(3)){{
const char* INPUT_PRINT_TEMPLATE =
\"
{{ Input: [%s]}}
\"
;
{}
VLOG(3) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str);
}}
"""
FORWARD_ONLY_FUNCTION_TEMPLATE
=
\
FORWARD_ONLY_FUNCTION_TEMPLATE
=
\
"""
"""
...
@@ -240,8 +254,10 @@ FORWARD_ONLY_FUNCTION_TEMPLATE = \
...
@@ -240,8 +254,10 @@ FORWARD_ONLY_FUNCTION_TEMPLATE = \
{}
{}
// Layout autotune
// Layout autotune
{}
{}
// Forward API Call
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
VLOG(5) <<
\"
Running C++ API:
\"
<<
\"
{}
\"
;
// Before log info
{}
// Forward API Call
{}
{}
// Get Outputs
// Get Outputs
{}
{}
...
@@ -1286,6 +1302,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
...
@@ -1286,6 +1302,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
returns_str
=
f
"
{
returns_type_str
}
{{
{
returns_str
}
}}"
returns_str
=
f
"
{
returns_type_str
}
{{
{
returns_str
}
}}"
# Node Creation Pre-Processing
# Node Creation Pre-Processing
inputs_names
=
[]
if
not
self
.
is_forward_only
:
if
not
self
.
is_forward_only
:
# 1. Get Input AutoGradMeta
# 1. Get Input AutoGradMeta
inputs_autograd_meta_list
=
[]
inputs_autograd_meta_list
=
[]
...
@@ -1400,12 +1417,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
...
@@ -1400,12 +1417,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
name
}
_str; "
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
name
}
_str; "
before_log_str
=
BEFORE_LOG_PRINT_TEMPLATE
.
format
(
var_str
)
for
name
,
(
ttype
,
pos
)
in
forward_outputs_position_map
.
items
():
for
name
,
(
ttype
,
pos
)
in
forward_outputs_position_map
.
items
():
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
name
}
_str; "
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
name
}
_str; "
log_str
=
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
log_str
=
AFTER_
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
# Generate forward_definition_str and forward_declaration_str
# Generate forward_definition_str and forward_declaration_str
if
self
.
is_forward_only
:
if
self
.
is_forward_only
:
...
@@ -1413,23 +1432,21 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
...
@@ -1413,23 +1432,21 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
amp_logic_str
=
"
\n
VLOG(7) <<
\"
No AMP for {} because it has no input.
\"
; "
.
format
(
amp_logic_str
=
"
\n
VLOG(7) <<
\"
No AMP for {} because it has no input.
\"
; "
.
format
(
forward_ad_function_name
)
forward_ad_function_name
)
self
.
forward_definition_str
+=
FORWARD_ONLY_FUNCTION_TEMPLATE
.
format
(
self
.
forward_definition_str
+=
FORWARD_ONLY_FUNCTION_TEMPLATE
.
format
(
returns_type_str
,
returns_type_str
,
forward_ad_function_name
,
forward_ad_function_name
,
inputs_args_definition_str
,
inputs_args_definition_str
,
forward_api_name
,
dygraph_event_str
,
GetDygraphLogName
(
forward_api_name
),
dygraph_event_str
,
amp_logic_str
,
layout_logic_str
,
forward_api_name
,
amp_logic_str
,
layout_logic_str
,
forward_api_name
,
forward_call_str
,
get_outputs_str
,
forward_ad_function_name
,
before_log_str
,
forward_call_str
,
get_outputs_str
,
log_str
,
returns_str
)
forward_api_name
,
log_str
,
returns_str
)
else
:
else
:
self
.
forward_definition_str
+=
FORWARD_FUNCTION_TEMPLATE
.
format
(
self
.
forward_definition_str
+=
FORWARD_FUNCTION_TEMPLATE
.
format
(
returns_type_str
,
returns_type_str
,
forward_ad_function_name
,
forward_ad_function_name
,
inputs_args_definition_str
,
inputs_args_definition_str
,
forward_api_name
,
dygraph_event_str
,
GetDygraphLogName
(
forward_api_name
),
dygraph_event_str
,
amp_logic_str
,
layout_logic_str
,
inputs_autograd_meta_str
,
amp_logic_str
,
layout_logic_str
,
inputs_autograd_meta_str
,
forward_api_name
,
forward_call_str
,
check_nan_inf
_str
,
forward_api_name
,
before_log_str
,
forward_call
_str
,
get_outputs_str
,
outputs_autograd_meta_str
,
check_nan_inf_str
,
get_outputs_str
,
outputs_autograd_meta_str
,
compute_require_grad_args_str
,
check_inplace_str
,
compute_require_grad_args_str
,
check_inplace_str
,
bump_inplace_version_str
,
node_creation_str
,
bump_inplace_version_str
,
node_creation_str
,
forward_api_name
,
forward_ad_function_name
,
log_str
,
returns_str
)
log_str
,
returns_str
)
self
.
forward_declaration_str
+=
f
"
{
returns_type_str
}
{
forward_ad_function_name
}
(
{
inputs_args_declaration_str
}
);
\n
"
self
.
forward_declaration_str
+=
f
"
{
returns_type_str
}
{
forward_ad_function_name
}
(
{
inputs_args_declaration_str
}
);
\n
"
...
@@ -1924,6 +1941,8 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
...
@@ -1924,6 +1941,8 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
new_name
}
_str; "
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
new_name
}
_str; "
before_log_str
=
BEFORE_LOG_PRINT_TEMPLATE
.
format
(
var_str
)
for
name
,
(
ttype
,
fwd_position
,
for
name
,
(
ttype
,
fwd_position
,
grad_api_position
)
in
backward_grad_outputs_map
.
items
():
grad_api_position
)
in
backward_grad_outputs_map
.
items
():
new_name
=
self
.
TransformToNextGradName
(
name
)
new_name
=
self
.
TransformToNextGradName
(
name
)
...
@@ -1931,16 +1950,16 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
...
@@ -1931,16 +1950,16 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
new_name
}
_str; "
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
new_name
}
_str; "
log_str
=
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
log_str
=
AFTER_
LOG_PRINT_TEMPLATE
.
format
(
var_str
)
self
.
node_definition_str
=
GRAD_FUNCTION_TEMPLATE
.
format
(
self
.
node_definition_str
=
GRAD_FUNCTION_TEMPLATE
.
format
(
grad_node_name
,
GetDygraphLogName
(
self
.
backward_api_name
)
,
grad_node_name
,
self
.
backward_api_name
,
fill_zero_str
,
fill_zero_str
,
get_grad_in_args_str
,
grad_function_prepare_str
,
get_grad_in_args_str
,
grad_function_prepare_str
,
compute_require_next_grad_str
,
inplace_check_str
,
compute_require_next_grad_str
,
inplace_check_str
,
inplace_for_grad_outs_str
,
self
.
backward_api_name
,
inplace_for_grad_outs_str
,
self
.
backward_api_name
,
before_log_str
,
grad_function_call_str
,
check_nan_inf_str
,
grad_function_call_str
,
check_nan_inf_str
,
outputs_autograd_meta_str
,
next_grad_node_creation_str
,
outputs_autograd_meta_str
,
next_grad_node_creation_str
,
GetDygraphLogName
(
self
.
backward_api_name
)
,
log_str
,
returns_str
)
self
.
backward_api_name
,
log_str
,
returns_str
)
def
run
(
self
):
def
run
(
self
):
super
().
run
()
super
().
run
()
...
...
paddle/fluid/eager/backward.cc
浏览文件 @
38c0fd02
...
@@ -71,7 +71,6 @@ std::unordered_map<GradNodeBase*, int> getInDegreeMap(
...
@@ -71,7 +71,6 @@ std::unordered_map<GradNodeBase*, int> getInDegreeMap(
// Enforce GradNode has TensorWrappers as Input
// Enforce GradNode has TensorWrappers as Input
void
EnforceGradNodeHasInput
(
GradNodeBase
*
node
)
{
void
EnforceGradNodeHasInput
(
GradNodeBase
*
node
)
{
VLOG
(
6
)
<<
"Running in EnforceGradNodeHasInput"
;
PADDLE_ENFORCE_NE
(
PADDLE_ENFORCE_NE
(
node
->
IsTensorWrappersCleared
(),
node
->
IsTensorWrappersCleared
(),
true
,
true
,
...
...
paddle/fluid/eager/eager_amp_auto_cast.h
浏览文件 @
38c0fd02
...
@@ -87,7 +87,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
...
@@ -87,7 +87,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
const
std
::
string
&
op_name
,
const
std
::
string
&
op_name
,
bool
trace_backward
=
true
)
{
bool
trace_backward
=
true
)
{
VLOG
(
6
)
<<
"AMP AmpAutoCasts:"
VLOG
(
6
)
<<
"AMP AmpAutoCasts:"
<<
" input("
<<
input_name
<<
")
dst_dtype("
<<
" input("
<<
egr
::
EagerUtils
::
TensorStr
(
input
)
<<
" to
dst_dtype("
<<
paddle
::
framework
::
DataType2String
(
dst_dtype
)
<<
")."
;
<<
paddle
::
framework
::
DataType2String
(
dst_dtype
)
<<
")."
;
if
(
dst_dtype
==
paddle
::
experimental
::
DataType
::
FLOAT16
)
{
if
(
dst_dtype
==
paddle
::
experimental
::
DataType
::
FLOAT16
)
{
if
(
op_name
==
"run_program"
)
{
if
(
op_name
==
"run_program"
)
{
...
@@ -107,6 +107,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
...
@@ -107,6 +107,7 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
}
}
}
}
if
(
NeedCast
(
input
,
dst_dtype
))
{
if
(
NeedCast
(
input
,
dst_dtype
))
{
VLOG
(
6
)
<<
"Input : "
<<
input
.
impl
()
<<
"NeedCast"
;
return
Cast
(
input
,
dst_dtype
,
trace_backward
);
return
Cast
(
input
,
dst_dtype
,
trace_backward
);
}
}
return
input
;
return
input
;
...
...
paddle/fluid/eager/utils.h
浏览文件 @
38c0fd02
...
@@ -257,8 +257,7 @@ class EagerUtils {
...
@@ -257,8 +257,7 @@ class EagerUtils {
}
else
{
}
else
{
tensor_name_str
=
t
.
name
();
tensor_name_str
=
t
.
name
();
}
}
const
char
*
TENSOR_INFO_TEMPLATE
=
const
char
*
TENSOR_INFO_TEMPLATE
=
"Type: %s, Dtype: %s, Place: %s"
;
"{ Type: [
\"
%s
\"
], Dtype:[
\"
%s
\"
], Place:[
\"
%s
\"
] }"
;
std
::
string
tensor_info_str
=
""
;
std
::
string
tensor_info_str
=
""
;
if
(
t
.
defined
())
{
if
(
t
.
defined
())
{
if
(
t
.
initialized
())
{
if
(
t
.
initialized
())
{
...
@@ -277,13 +276,13 @@ class EagerUtils {
...
@@ -277,13 +276,13 @@ class EagerUtils {
}
}
if
(
VLOG_IS_ON
(
6
))
{
if
(
VLOG_IS_ON
(
6
))
{
const
char
*
TENSOR_PRINT_TEMPLATE
=
const
char
*
TENSOR_PRINT_TEMPLATE
=
"{
Name:[
\"
%s
\"
], Initialized: [
\"
%d
\"
], Ptr: [
\"
%d
\"
]
"
"{
Name: %s, Initialized: %d, Ptr: %d
"
"TensorInfo: [
\"
%s
\"
], ADInfo:[
\"
%s
\"
]
}"
;
"TensorInfo: [
%s ], ADInfo:[ %s ]
}"
;
auto
*
ad_meta
=
nullable_autograd_meta
(
t
);
auto
*
ad_meta
=
nullable_autograd_meta
(
t
);
if
(
!
ad_meta
&&
!
(
ad_meta
->
WeakGrad
().
lock
().
get
()))
{
if
(
ad_meta
&&
(
ad_meta
->
WeakGrad
().
lock
().
get
()))
{
std
::
string
ad_info_str
=
""
;
std
::
string
ad_info_str
=
""
;
const
char
*
AD_INFO_TEMPLATE
=
const
char
*
AD_INFO_TEMPLATE
=
"
{ Grad: [
\"
%s
\"
], GradNode: [ %s ], StopGradient: [ %d ] }
"
;
"
Grad: [ %s ], GradNode: [ %s ], StopGradient: [ %d ]
"
;
ad_info_str
+=
paddle
::
string
::
Sprintf
(
AD_INFO_TEMPLATE
,
ad_info_str
+=
paddle
::
string
::
Sprintf
(
AD_INFO_TEMPLATE
,
TensorStr
(
ad_meta
->
Grad
()),
TensorStr
(
ad_meta
->
Grad
()),
GradNodeStr
(
t
),
GradNodeStr
(
t
),
...
@@ -304,8 +303,8 @@ class EagerUtils {
...
@@ -304,8 +303,8 @@ class EagerUtils {
}
}
}
else
if
(
VLOG_IS_ON
(
5
))
{
}
else
if
(
VLOG_IS_ON
(
5
))
{
const
char
*
TENSOR_PRINT_TEMPLATE
=
const
char
*
TENSOR_PRINT_TEMPLATE
=
"{
Name:[
\"
%s
\"
], Initialized: [
\"
%d
\"
], Ptr: [
\"
%d
\"
]
"
"{
Name: %s, Initialized: %d , Ptr: %d
"
"TensorInfo: [
\"
%s
\"
]
}"
;
"TensorInfo: [
%s ]
}"
;
return
paddle
::
string
::
Sprintf
(
TENSOR_PRINT_TEMPLATE
,
return
paddle
::
string
::
Sprintf
(
TENSOR_PRINT_TEMPLATE
,
tensor_name_str
,
tensor_name_str
,
t
.
initialized
(),
t
.
initialized
(),
...
@@ -313,7 +312,7 @@ class EagerUtils {
...
@@ -313,7 +312,7 @@ class EagerUtils {
tensor_info_str
);
tensor_info_str
);
}
else
if
(
VLOG_IS_ON
(
4
))
{
}
else
if
(
VLOG_IS_ON
(
4
))
{
const
char
*
TENSOR_PRINT_TEMPLATE
=
const
char
*
TENSOR_PRINT_TEMPLATE
=
"{ Name:
[
\"
%s
\"
], Initialized: [
\"
%d
\"
], Ptr: [
\"
%d
\"
]
}"
;
"{ Name:
%s, Initialized: %d, Ptr: %d
}"
;
return
paddle
::
string
::
Sprintf
(
return
paddle
::
string
::
Sprintf
(
TENSOR_PRINT_TEMPLATE
,
tensor_name_str
,
t
.
initialized
(),
t
.
impl
());
TENSOR_PRINT_TEMPLATE
,
tensor_name_str
,
t
.
initialized
(),
t
.
impl
());
}
else
{
}
else
{
...
@@ -324,10 +323,10 @@ class EagerUtils {
...
@@ -324,10 +323,10 @@ class EagerUtils {
static
const
std
::
string
GradNodeStr
(
const
egr
::
GradNodeBase
&
node
)
{
static
const
std
::
string
GradNodeStr
(
const
egr
::
GradNodeBase
&
node
)
{
if
(
VLOG_IS_ON
(
6
))
{
if
(
VLOG_IS_ON
(
6
))
{
const
char
*
GRAD_NODE_TEMPLATE
=
const
char
*
GRAD_NODE_TEMPLATE
=
"
{ BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }
"
;
"
BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]
"
;
const
char
*
GRAD_SLOT_META_TEMPLATE
=
" {SlotSize: [%d]: %s} "
;
const
char
*
GRAD_SLOT_META_TEMPLATE
=
" {SlotSize: [%d]: %s} "
;
const
char
*
SLOT_INFO_TEMPLATE
=
const
char
*
SLOT_INFO_TEMPLATE
=
"
{SlotID: [
\"
%s
\"
], StopGradients: [ %s ], Edges[ %s ] }
"
;
"
SlotID: %s, StopGradients: %s, Edges[ %s ]
"
;
auto
out_metas
=
node
.
OutputMeta
();
auto
out_metas
=
node
.
OutputMeta
();
auto
in_metas
=
node
.
InputMeta
();
auto
in_metas
=
node
.
InputMeta
();
std
::
string
out_slot_str
=
""
;
std
::
string
out_slot_str
=
""
;
...
@@ -372,8 +371,8 @@ class EagerUtils {
...
@@ -372,8 +371,8 @@ class EagerUtils {
GRAD_NODE_TEMPLATE
,
out_meta_str
,
in_meta_str
);
GRAD_NODE_TEMPLATE
,
out_meta_str
,
in_meta_str
);
}
else
if
(
VLOG_IS_ON
(
5
))
{
}
else
if
(
VLOG_IS_ON
(
5
))
{
const
char
*
GRAD_NODE_TEMPLATE
=
const
char
*
GRAD_NODE_TEMPLATE
=
"
{ BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ] }
"
;
"
BackwardOutMeta: [ %s ], BackwardInMeta: [ %s ]
"
;
const
char
*
GRAD_SLOT_META_TEMPLATE
=
"SlotSize:
[
\"
%d
\"
]
"
;
const
char
*
GRAD_SLOT_META_TEMPLATE
=
"SlotSize:
%d
"
;
std
::
string
out_meta_str
=
paddle
::
string
::
Sprintf
(
std
::
string
out_meta_str
=
paddle
::
string
::
Sprintf
(
GRAD_SLOT_META_TEMPLATE
,
node
.
OutputMeta
().
size
());
GRAD_SLOT_META_TEMPLATE
,
node
.
OutputMeta
().
size
());
std
::
string
in_meta_str
=
paddle
::
string
::
Sprintf
(
std
::
string
in_meta_str
=
paddle
::
string
::
Sprintf
(
...
@@ -387,7 +386,7 @@ class EagerUtils {
...
@@ -387,7 +386,7 @@ class EagerUtils {
static
const
std
::
string
GradNodeStr
(
const
paddle
::
experimental
::
Tensor
&
t
)
{
static
const
std
::
string
GradNodeStr
(
const
paddle
::
experimental
::
Tensor
&
t
)
{
auto
*
ad_meta
=
nullable_autograd_meta
(
t
);
auto
*
ad_meta
=
nullable_autograd_meta
(
t
);
if
(
ad_meta
&&
!
(
ad_meta
->
GetMutableGradNode
().
get
()))
{
if
(
ad_meta
&&
(
ad_meta
->
GetMutableGradNode
().
get
()))
{
return
GradNodeStr
((
*
ad_meta
->
GetMutableGradNode
().
get
()));
return
GradNodeStr
((
*
ad_meta
->
GetMutableGradNode
().
get
()));
}
else
{
}
else
{
return
"None"
;
return
"None"
;
...
...
paddle/phi/api/yaml/legacy_ops.yaml
浏览文件 @
38c0fd02
...
@@ -1464,13 +1464,16 @@
...
@@ -1464,13 +1464,16 @@
backward
:
linear_interp_grad
backward
:
linear_interp_grad
-
op
:
linspace
-
op
:
linspace
args
:
(Tensor start, Tensor stop, Tensor number, DataType dtype)
args
:
(Tensor start, Tensor stop, Tensor number, DataType dtype
, Place place
)
output
:
Tensor(out)
output
:
Tensor(out)
infer_meta
:
infer_meta
:
func
:
LinspaceInferMeta
func
:
LinspaceInferMeta
param
:
[
start
,
stop
,
number
,
dtype
]
kernel
:
kernel
:
func
:
linspace
func
:
linspace
param
:
[
start
,
stop
,
number
,
dtype
]
data_type
:
dtype
data_type
:
dtype
backend
:
place
-
op
:
log
-
op
:
log
args
:
(Tensor x)
args
:
(Tensor x)
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
38c0fd02
...
@@ -1606,7 +1606,8 @@ def linspace(start, stop, num, dtype=None, name=None):
...
@@ -1606,7 +1606,8 @@ def linspace(start, stop, num, dtype=None, name=None):
with
device_guard
(
"cpu"
):
with
device_guard
(
"cpu"
):
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
)
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
)
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
():
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
'dtype'
,
dtype
)
...
...
python/paddle/tensor/creation.py
浏览文件 @
38c0fd02
...
@@ -100,7 +100,8 @@ def linspace(start, stop, num, dtype=None, name=None):
...
@@ -100,7 +100,8 @@ def linspace(start, stop, num, dtype=None, name=None):
with
device_guard
(
"cpu"
):
with
device_guard
(
"cpu"
):
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
,
force_cpu
=
True
)
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
,
force_cpu
=
True
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
)
return
_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
():
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
'dtype'
,
dtype
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录