Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
2998a7d2
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2998a7d2
编写于
4月 26, 2022
作者:
W
Weilong Wu
提交者:
GitHub
4月 26, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] Remove retain_grad_flag in accumulation_nade, add is_new_grad args in operator (#42240)
上级
12311ddc
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
20 addition
and
21 deletion
+20
-21
paddle/fluid/eager/accumulation/accumulation_node.cc
paddle/fluid/eager/accumulation/accumulation_node.cc
+3
-3
paddle/fluid/eager/accumulation/accumulation_node.h
paddle/fluid/eager/accumulation/accumulation_node.h
+1
-1
paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc
...ger/api/generated/eager_generated/backwards/scale_node.cc
+1
-1
paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h
...ager/api/generated/eager_generated/backwards/scale_node.h
+1
-1
paddle/fluid/eager/auto_code_generator/eager_generator.cc
paddle/fluid/eager/auto_code_generator/eager_generator.cc
+2
-2
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
...er/auto_code_generator/final_state_generator/eager_gen.py
+2
-2
paddle/fluid/eager/backward.cc
paddle/fluid/eager/backward.cc
+1
-1
paddle/fluid/eager/custom_operator/custom_operator_node.cc
paddle/fluid/eager/custom_operator/custom_operator_node.cc
+3
-2
paddle/fluid/eager/custom_operator/custom_operator_node.h
paddle/fluid/eager/custom_operator/custom_operator_node.h
+1
-1
paddle/fluid/eager/grad_node_info.h
paddle/fluid/eager/grad_node_info.h
+1
-1
paddle/fluid/eager/pylayer/py_layer_node.cc
paddle/fluid/eager/pylayer/py_layer_node.cc
+1
-1
paddle/fluid/eager/pylayer/py_layer_node.h
paddle/fluid/eager/pylayer/py_layer_node.h
+1
-1
paddle/fluid/eager/tests/data_structure_tests/grad_node_test.h
...e/fluid/eager/tests/data_structure_tests/grad_node_test.h
+1
-1
paddle/fluid/eager/to_static/run_program_op_node.h
paddle/fluid/eager/to_static/run_program_op_node.h
+1
-1
python/paddle/fluid/tests/unittests/test_tensor_register_hook.py
...paddle/fluid/tests/unittests/test_tensor_register_hook.py
+0
-2
未找到文件。
paddle/fluid/eager/accumulation/accumulation_node.cc
浏览文件 @
2998a7d2
...
...
@@ -24,7 +24,7 @@
#include "paddle/fluid/platform/errors.h"
#include "glog/logging.h"
DECLARE_bool
(
retain_grad_for_all_tensor
);
namespace
egr
{
static
void
CopyOrAddTensor
(
paddle
::
experimental
::
Tensor
*
tensor
,
...
...
@@ -41,7 +41,7 @@ static void CopyOrAddTensor(paddle::experimental::Tensor* tensor,
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
GradNodeAccumulation
::
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
)
{
bool
create_graph
,
bool
is_new_grad
)
{
VLOG
(
3
)
<<
"Running Eager Backward Node: GradNodeAccumulation"
;
PADDLE_ENFORCE
(
grads
.
size
()
==
1
,
paddle
::
platform
::
errors
::
Fatal
(
...
...
@@ -63,7 +63,7 @@ operator()(
grad_out
=
grads
[
0
][
0
];
}
if
(
!
weak_grad_
.
expired
()
&&
FLAGS_retain_grad_for_all_tensor
)
{
if
(
!
weak_grad_
.
expired
()
&&
!
is_new_grad
)
{
auto
grad
=
weak_grad_
.
lock
();
CopyOrAddTensor
(
grad
.
get
(),
grad_out
);
}
...
...
paddle/fluid/eager/accumulation/accumulation_node.h
浏览文件 @
2998a7d2
...
...
@@ -39,7 +39,7 @@ class GradNodeAccumulation : public GradNodeBase {
// Functor: perform backward computations
virtual
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
=
false
)
override
;
bool
create_graph
=
false
,
bool
is_new_grad
=
false
)
override
;
void
ClearTensorWrappers
()
override
{
VLOG
(
6
)
<<
"Do nothing here now"
;
}
...
...
paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc
浏览文件 @
2998a7d2
...
...
@@ -147,7 +147,7 @@ void GradNodeScale::SetAttributes_scale(float scale) { scale_ = scale; }
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
GradNodeScale
::
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
)
{
bool
create_graph
,
bool
is_new_grad
)
{
// 1. Check Output Size
PADDLE_ENFORCE
(
((
grads
.
size
()
==
1
)
&&
(
grads
[
0
].
size
()
==
1
)),
...
...
paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h
浏览文件 @
2998a7d2
...
...
@@ -40,7 +40,7 @@ class GradNodeScale : public GradNodeBase {
// Functor: perform backward computations
virtual
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
=
false
)
override
;
bool
create_graph
=
false
,
bool
is_new_grad
=
false
)
override
;
void
ClearTensorWrappers
()
override
{
VLOG
(
6
)
<<
"Do nothing here now"
;
}
...
...
paddle/fluid/eager/auto_code_generator/eager_generator.cc
浏览文件 @
2998a7d2
...
...
@@ -2444,7 +2444,7 @@ static std::string GenerateGradNodeCCContents(
"std::vector<std::vector<paddle::experimental::Tensor>> "
"GradNode%s::operator()("
"std::vector<std::vector<paddle::experimental::Tensor>>& grads, bool "
"create_graph) {
\n
"
"create_graph
, bool is_new_grad
) {
\n
"
"%s"
"%s"
"
\n
}"
;
...
...
@@ -2490,7 +2490,7 @@ static std::string GenerateGradNodeHeaderContents(
" virtual std::vector<std::vector<paddle::experimental::Tensor>> "
"operator()("
"std::vector<std::vector<paddle::experimental::Tensor>>& grads, bool "
"create_graph = false) "
"create_graph = false
, bool is_new_grad = false
) "
"override;
\n
"
"
\n
"
" void ClearTensorWrappers() override {
\n
"
...
...
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
浏览文件 @
2998a7d2
...
...
@@ -119,7 +119,7 @@ class {} : public egr::GradNodeBase {{
~{}() override = default;
virtual std::vector<std::vector<paddle::experimental::Tensor>> operator()(
std::vector<std::vector<paddle::experimental::Tensor>>& grads, bool create_graph = false) override;
std::vector<std::vector<paddle::experimental::Tensor>>& grads, bool create_graph = false
, bool is_new_grad = false
) override;
std::string name() override {{ return
\"
{}
\"
; }}
void ClearTensorWrappers() override {{
...
...
@@ -149,7 +149,7 @@ class {} : public egr::GradNodeBase {{
GRAD_FUNCTION_TEMPLATE
=
\
"""
std::vector<std::vector<paddle::experimental::Tensor>> {}::operator()(std::vector<std::vector<paddle::experimental::Tensor>>& grads, bool create_graph) {{
std::vector<std::vector<paddle::experimental::Tensor>> {}::operator()(std::vector<std::vector<paddle::experimental::Tensor>>& grads, bool create_graph
, bool is_new_grad
) {{
// Fill Zero For GradIn Tensors
{}
...
...
paddle/fluid/eager/backward.cc
浏览文件 @
2998a7d2
...
...
@@ -690,7 +690,7 @@ std::vector<paddle::experimental::Tensor> RunBackward(
VLOG
(
6
)
<<
"Run Backward Kernel with GradTensorHolder."
;
// Run Pre Backward Node and get outputs
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
grad_output_tensors
=
(
*
node
)(
node_input_buffer
->
Buffers
(),
create_graph
);
(
*
node
)(
node_input_buffer
->
Buffers
(),
create_graph
,
is_general_grad
);
// retain_grad or not
if
(
!
retain_graph
)
{
...
...
paddle/fluid/eager/custom_operator/custom_operator_node.cc
浏览文件 @
2998a7d2
...
...
@@ -20,8 +20,9 @@
namespace
egr
{
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
RunCustomOpNode
::
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
bool
create_graph
)
{
// NOLINT
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
,
bool
is_new_grad
)
{
paddle
::
CustomOpKernelContext
ctx
;
auto
grad_inputs_name
=
paddle
::
framework
::
OpMetaInfoHelper
::
GetInputs
(
egr
::
Controller
::
Instance
().
GetOpMetaInfoMap
().
at
(
op_type_
)[
1
]);
...
...
paddle/fluid/eager/custom_operator/custom_operator_node.h
浏览文件 @
2998a7d2
...
...
@@ -39,7 +39,7 @@ class RunCustomOpNode : public GradNodeBase {
virtual
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
operator
()(
// NOLINT
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
=
false
)
// NOLINT
bool
create_graph
=
false
,
bool
is_new_grad
=
false
)
// NOLINT
override
;
std
::
string
name
()
{
...
...
paddle/fluid/eager/grad_node_info.h
浏览文件 @
2998a7d2
...
...
@@ -109,7 +109,7 @@ class GradNodeBase {
* **/
virtual
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
=
false
)
=
0
;
bool
create_graph
=
false
,
bool
is_new_grad
=
false
)
=
0
;
virtual
void
ClearTensorWrappers
()
=
0
;
...
...
paddle/fluid/eager/pylayer/py_layer_node.cc
浏览文件 @
2998a7d2
...
...
@@ -32,7 +32,7 @@ namespace egr {
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
GradNodePyLayer
::
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
)
{
bool
create_graph
,
bool
is_new_grad
)
{
VLOG
(
3
)
<<
"Running Eager Backward Node: "
<<
name
();
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
hooked_grads
=
...
...
paddle/fluid/eager/pylayer/py_layer_node.h
浏览文件 @
2998a7d2
...
...
@@ -36,7 +36,7 @@ class GradNodePyLayer : public GradNodeBase {
virtual
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
=
false
)
override
;
bool
create_graph
=
false
,
bool
is_new_grad
=
false
)
override
;
void
ClearTensorWrappers
()
override
{
VLOG
(
6
)
<<
"Do nothing here now"
;
}
...
...
paddle/fluid/eager/tests/data_structure_tests/grad_node_test.h
浏览文件 @
2998a7d2
...
...
@@ -33,7 +33,7 @@ class GradTestNode : public egr::GradNodeBase {
std
::
string
name
()
override
{
return
"GradTestNode"
;
}
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>&
grads
,
// NOLINT
bool
create_graph
=
false
)
override
{
bool
create_graph
=
false
,
bool
is_new_grad
=
false
)
override
{
val_
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
grads
[
0
][
0
].
impl
())
->
data
<
float
>
()[
0
];
phi
::
DenseTensorMeta
meta
=
...
...
paddle/fluid/eager/to_static/run_program_op_node.h
浏览文件 @
2998a7d2
...
...
@@ -366,7 +366,7 @@ class GradNodeRunProgram : public egr::GradNodeBase {
// Functor: perform backward computations
virtual
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
operator
()(
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
&
grads
,
// NOLINT
bool
create_graph
)
override
{
bool
create_graph
,
bool
is_new_grad
)
override
{
VLOG
(
3
)
<<
"Running Eager Backward Node: GradNodeRunProgram"
;
std
::
vector
<
std
::
vector
<
paddle
::
experimental
::
Tensor
>>
hooked_grads
=
GradNodeRunProgram
::
ApplyGradientHooks
(
grads
);
...
...
python/paddle/fluid/tests/unittests/test_tensor_register_hook.py
浏览文件 @
2998a7d2
...
...
@@ -462,11 +462,9 @@ class TestTensorRegisterHook(unittest.TestCase):
x
.
register_hook
(
double_print_hook
)
y
=
x
*
x
fluid
.
set_flags
({
'FLAGS_retain_grad_for_all_tensor'
:
False
})
# Since y = x * x, dx = 2 * x
dx
=
paddle
.
grad
(
outputs
=
[
y
],
inputs
=
[
x
],
create_graph
=
True
,
retain_graph
=
True
)[
0
]
fluid
.
set_flags
({
'FLAGS_retain_grad_for_all_tensor'
:
True
})
z
=
y
+
dx
self
.
assertTrue
(
x
.
grad
is
None
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录