Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
5e878ecc
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5e878ecc
编写于
9月 23, 2022
作者:
J
Jiabin Yang
提交者:
GitHub
9月 23, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
optimize log (#46349)
上级
812e4b47
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
13 addition
and
7 deletion
+13
-7
paddle/fluid/eager/accumulation/accumulation_node.cc
paddle/fluid/eager/accumulation/accumulation_node.cc
+4
-0
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
...le/fluid/eager/auto_code_generator/generator/eager_gen.py
+5
-5
paddle/fluid/eager/backward.cc
paddle/fluid/eager/backward.cc
+1
-2
paddle/fluid/eager/general_grad.h
paddle/fluid/eager/general_grad.h
+3
-0
未找到文件。
paddle/fluid/eager/accumulation/accumulation_node.cc
浏览文件 @
5e878ecc
...
...
@@ -31,12 +31,16 @@ static void CopyOrAddTensor(paddle::experimental::Tensor* tensor,
const
paddle
::
experimental
::
Tensor
&
t
,
bool
is_fake_empty
)
{
if
(
is_fake_empty
)
{
VLOG
(
3
)
<<
"Move Tensor ptr: "
<<
t
.
impl
();
*
tensor
=
t
;
}
else
{
if
(
!
tensor
->
defined
()
||
!
tensor
->
initialized
())
{
// Simply copy tensor->impl
VLOG
(
3
)
<<
"Move Tensor ptr: "
<<
t
.
impl
();
*
tensor
=
t
;
}
else
{
VLOG
(
3
)
<<
"Add Tensor ptr: "
<<
t
.
impl
()
<<
" with Tensor ptr: "
<<
tensor
->
impl
();
// Accumulation
if
(
LIKELY
(
t
.
is_dense_tensor
()))
{
if
(
LIKELY
(
tensor
->
is_dense_tensor
()))
{
...
...
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
浏览文件 @
5e878ecc
...
...
@@ -1414,13 +1414,13 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
var_str
=
f
"
\n
{
indent
}
std::string input_str =
\"\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string output_str =
\"\"
;"
for
name
,
(
ttype
,
pos
)
in
forward_inputs_position_map
.
items
():
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
\\
n(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
name
}
_str; "
before_log_str
=
BEFORE_LOG_PRINT_TEMPLATE
.
format
(
var_str
)
for
name
,
(
ttype
,
pos
)
in
forward_outputs_position_map
.
items
():
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
name
.
upper
()
}
_TEMPLATE =
\"
\\
n(
{
name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
name
}
_str = paddle::string::Sprintf(TENSOR_
{
name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
name
}
));"
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
name
}
_str; "
...
...
@@ -1930,14 +1930,14 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
for
name
,
(
ttype
,
fwd_position
,
grad_api_position
)
in
backward_grad_inputs_map
.
items
():
new_name
=
self
.
TransformToNextGradName
(
name
)
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE =
\"
(
{
new_name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE =
\"
\\
n(
{
new_name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
new_name
}
_str; "
for
name
,
(
backward_input_type
,
is_fwd_input
,
grad_api_position
),
in
backward_forward_inputs_map
.
items
():
new_name
=
self
.
TransformToNextGradName
(
name
)
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE =
\"
(
{
new_name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE =
\"
\\
n(
{
new_name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string input_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
input_str += input_
{
new_name
}
_str; "
...
...
@@ -1946,7 +1946,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
for
name
,
(
ttype
,
fwd_position
,
grad_api_position
)
in
backward_grad_outputs_map
.
items
():
new_name
=
self
.
TransformToNextGradName
(
name
)
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE =
\"
(
{
new_name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
const char* TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE =
\"
\\
n (
{
new_name
}
, [%s]),
\"
;"
var_str
+=
f
"
\n
{
indent
}
std::string output_
{
new_name
}
_str = paddle::string::Sprintf(TENSOR_
{
new_name
.
upper
()
}
_TEMPLATE, egr::EagerUtils::TensorStr(
{
new_name
}
));"
var_str
+=
f
"
\n
{
indent
}
output_str += output_
{
new_name
}
_str; "
...
...
paddle/fluid/eager/backward.cc
浏览文件 @
5e878ecc
...
...
@@ -226,7 +226,6 @@ std::vector<paddle::experimental::Tensor> RunBackward(
while
(
!
queue
.
empty
())
{
GradNodeBase
*
node
=
queue
.
front
();
VLOG
(
3
)
<<
"Preparing GradNode:"
<<
node
->
name
()
<<
" addr:"
<<
node
;
VLOG
(
4
)
<<
EagerUtils
::
GradNodeStr
(
*
node
);
paddle
::
platform
::
RecordEvent
node_record_event
(
std
::
string
((
*
node
).
name
()),
paddle
::
platform
::
TracerEventType
::
Operator
,
...
...
@@ -338,7 +337,7 @@ std::vector<paddle::experimental::Tensor> RunBackward(
node_input_buffers_dict
[
next_node
]
=
std
::
move
(
grad_tensor_holder
);
}
VLOG
(
3
)
<<
"Sum grad inputs for edge slot: "
<<
edge_rank
.
first
VLOG
(
3
)
<<
"Sum
or Move
grad inputs for edge slot: "
<<
edge_rank
.
first
<<
", rank: "
<<
edge_rank
.
second
;
node_input_buffers_dict
[
next_node
]
->
add
(
edge_rank
.
first
,
...
...
paddle/fluid/eager/general_grad.h
浏览文件 @
5e878ecc
...
...
@@ -557,6 +557,9 @@ class GeneralGrad {
}
else
{
copied_next_node
=
orig_next_node
->
Copy
();
orig_to_copied_node_map_
[
orig_next_node
.
get
()]
=
copied_next_node
;
VLOG
(
3
)
<<
"Copied Node: "
<<
orig_next_node
->
name
()
<<
" ptr: "
<<
orig_next_node
<<
" to ptr: "
<<
copied_next_node
;
copied_grad_nodes_
.
push_back
(
copied_next_node
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录