Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
10188e8f
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
10188e8f
编写于
6月 13, 2023
作者:
X
xiaoguoguo626807
提交者:
GitHub
6月 13, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
【prim】delete multiply_triple_grad dygraph path (#54558)
* mutiply_triple delete * add case * add timeout
上级
60e3e350
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
17 addition
and
449 deletion
+17
-449
paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc
...luid/eager/api/manual/eager_manual/nodes/multiply_node.cc
+16
-382
paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h
paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h
+0
-63
test/prim/prim/vjp/CMakeLists.txt
test/prim/prim/vjp/CMakeLists.txt
+1
-1
test/prim/prim/vjp/test_comp_high_grad.py
test/prim/prim/vjp/test_comp_high_grad.py
+0
-3
未找到文件。
paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc
浏览文件 @
10188e8f
...
...
@@ -349,35 +349,22 @@ MultiplyDoubleGradNode::operator()(
// Call grad_api function
if
(
paddle
::
prim
::
PrimCommonUtils
::
IsEagerPrimEnabled
())
{
bool
original_global_grad
=
egr
::
Controller
::
Instance
().
HasGrad
();
if
(
!
create_graph
)
{
egr
::
Controller
::
Instance
().
SetHasGrad
(
create_graph
);
}
paddle
::
prim
::
multiply_double_grad
<
paddle
::
Tensor
>
(
x
,
y
,
fwd_grad_out
,
fwd_grad_grad_x_optional
,
fwd_grad_grad_y_optional
,
axis
,
api_output_0
,
api_output_1
,
api_output_2
);
VLOG
(
4
)
<<
"Composite api multiply_double_grad is called "
;
if
(
!
create_graph
)
{
egr
::
Controller
::
Instance
().
SetHasGrad
(
original_global_grad
);
}
}
else
{
paddle
::
experimental
::
multiply_double_grad
(
x
,
y
,
fwd_grad_out
,
fwd_grad_grad_x_optional
,
fwd_grad_grad_y_optional
,
axis
,
api_output_0
,
api_output_1
,
api_output_2
);
VLOG
(
4
)
<<
"Fused api multiply_double_grad is called "
;
bool
original_global_grad
=
egr
::
Controller
::
Instance
().
HasGrad
();
if
(
!
create_graph
)
{
egr
::
Controller
::
Instance
().
SetHasGrad
(
create_graph
);
}
paddle
::
prim
::
multiply_double_grad
<
paddle
::
Tensor
>
(
x
,
y
,
fwd_grad_out
,
fwd_grad_grad_x_optional
,
fwd_grad_grad_y_optional
,
axis
,
api_output_0
,
api_output_1
,
api_output_2
);
VLOG
(
4
)
<<
"Composite api multiply_double_grad is called "
;
if
(
!
create_graph
)
{
egr
::
Controller
::
Instance
().
SetHasGrad
(
original_global_grad
);
}
// Check NaN and Inf id needed
...
...
@@ -417,56 +404,6 @@ MultiplyDoubleGradNode::operator()(
// Create Grad Node
if
(
!
paddle
::
prim
::
PrimCommonUtils
::
IsEagerPrimEnabled
())
{
if
(
trace_backward
)
{
paddle
::
platform
::
RecordEvent
node_creation_record_event
(
"multiply_double_grad node_creation"
,
paddle
::
platform
::
TracerEventType
::
OperatorInner
,
1
);
// Node Construction
auto
grad_node
=
std
::
shared_ptr
<
MultiplyTripleGradNode
>
(
new
MultiplyTripleGradNode
(
3
,
5
));
// SetAttributes if needed
grad_node
->
SetAttributeaxis
(
-
1
);
// Set TensorWrappers for Forward Inputs if needed
grad_node
->
SetTensorWrapperx
(
x
);
grad_node
->
SetTensorWrappery
(
y
);
grad_node
->
SetTensorWrapperfwd_grad_out
(
fwd_grad_out
);
grad_node
->
SetTensorWrapperfwd_grad_grad_x
(
fwd_grad_grad_x
);
grad_node
->
SetTensorWrapperfwd_grad_grad_y
(
fwd_grad_grad_y
);
// SetGradOutMeta & SetEdges
grad_node
->
SetGradOutMeta
(
x
,
0
);
grad_node
->
SetGradOutMeta
(
y
,
1
);
grad_node
->
SetGradOutMeta
(
fwd_grad_out
,
2
);
grad_node
->
SetGradOutMeta
(
fwd_grad_grad_x
,
3
);
grad_node
->
SetGradOutMeta
(
fwd_grad_grad_y
,
4
);
// SetOutRank & SetHistory & SetGradInMeta
if
(
grad_x_autograd_meta
)
{
egr
::
EagerUtils
::
SetOutRankWithSlot
(
grad_x_autograd_meta
,
0
);
}
if
(
grad_y_autograd_meta
)
{
egr
::
EagerUtils
::
SetOutRankWithSlot
(
grad_y_autograd_meta
,
1
);
}
if
(
grad_grad_out_autograd_meta
)
{
egr
::
EagerUtils
::
SetOutRankWithSlot
(
grad_grad_out_autograd_meta
,
2
);
}
if
(
grad_x_autograd_meta
)
{
egr
::
EagerUtils
::
SetHistory
(
grad_x_autograd_meta
,
grad_node
);
}
if
(
grad_y_autograd_meta
)
{
egr
::
EagerUtils
::
SetHistory
(
grad_y_autograd_meta
,
grad_node
);
}
if
(
grad_grad_out_autograd_meta
)
{
egr
::
EagerUtils
::
SetHistory
(
grad_grad_out_autograd_meta
,
grad_node
);
}
grad_node
->
SetGradInMeta
(
grad_x
,
0
);
grad_node
->
SetGradInMeta
(
grad_y
,
1
);
grad_node
->
SetGradInMeta
(
grad_grad_out
,
2
);
// Set TensorWrappers for Forward Outputs if needed
}
}
VLOG
(
4
)
<<
"Finish AD API GRAD: multiply_double_grad"
;
// LOG IF DEBUG
...
...
@@ -521,309 +458,6 @@ MultiplyDoubleGradNode::operator()(
return
returns
;
}
paddle
::
small_vector
<
std
::
vector
<
paddle
::
Tensor
>
,
egr
::
kSlotSmallVectorSize
>
MultiplyTripleGradNode
::
operator
()(
paddle
::
small_vector
<
std
::
vector
<
paddle
::
Tensor
>
,
egr
::
kSlotSmallVectorSize
>&
grads
,
bool
create_graph
,
bool
is_new_grad
)
{
VLOG
(
3
)
<<
"Running AD API GRAD: "
<<
"multiply_triple_grad"
;
// Fill Zero For GradIn Tensors
const
auto
&
input_metas
=
this
->
InputMeta
();
egr
::
EagerUtils
::
FillZeroForEmptyOptionalGradInput
(
&
grads
[
0
][
0
],
input_metas
[
0
][
0
]);
egr
::
EagerUtils
::
FillZeroForEmptyOptionalGradInput
(
&
grads
[
1
][
0
],
input_metas
[
1
][
0
]);
egr
::
EagerUtils
::
FillZeroForEmptyOptionalGradInput
(
&
grads
[
2
][
0
],
input_metas
[
2
][
0
]);
// Apply Gradient Hooks
auto
hooked_grads
=
ApplyGradientHooks
(
grads
);
// Collect GradIn Tensors, Attrs and Recovered TensorWrappers
auto
x
=
egr
::
EagerUtils
::
RecoverTensorWrapper
(
&
this
->
x_
);
auto
y
=
egr
::
EagerUtils
::
RecoverTensorWrapper
(
&
this
->
y_
);
auto
fwd_grad_out
=
egr
::
EagerUtils
::
RecoverTensorWrapper
(
&
this
->
fwd_grad_out_
);
auto
fwd_grad_grad_x
=
egr
::
EagerUtils
::
RecoverTensorWrapper
(
&
this
->
fwd_grad_grad_x_
);
paddle
::
optional
<
paddle
::
Tensor
>
fwd_grad_grad_x_optional
;
if
(
fwd_grad_grad_x
.
impl
())
fwd_grad_grad_x_optional
=
paddle
::
make_optional
<
paddle
::
Tensor
>
(
fwd_grad_grad_x
);
auto
fwd_grad_grad_y
=
egr
::
EagerUtils
::
RecoverTensorWrapper
(
&
this
->
fwd_grad_grad_y_
);
paddle
::
optional
<
paddle
::
Tensor
>
fwd_grad_grad_y_optional
;
if
(
fwd_grad_grad_y
.
impl
())
fwd_grad_grad_y_optional
=
paddle
::
make_optional
<
paddle
::
Tensor
>
(
fwd_grad_grad_y
);
auto
&
grad_x_grad
=
hooked_grads
[
0
][
0
];
paddle
::
optional
<
paddle
::
Tensor
>
grad_x_grad_optional
;
if
(
grad_x_grad
.
initialized
())
grad_x_grad_optional
=
paddle
::
make_optional
<
paddle
::
Tensor
>
(
grad_x_grad
);
auto
&
grad_y_grad
=
hooked_grads
[
1
][
0
];
paddle
::
optional
<
paddle
::
Tensor
>
grad_y_grad_optional
;
if
(
grad_y_grad
.
initialized
())
grad_y_grad_optional
=
paddle
::
make_optional
<
paddle
::
Tensor
>
(
grad_y_grad
);
auto
&
grad_grad_out_grad
=
hooked_grads
[
2
][
0
];
paddle
::
optional
<
paddle
::
Tensor
>
grad_grad_out_grad_optional
;
if
(
grad_grad_out_grad
.
initialized
())
grad_grad_out_grad_optional
=
paddle
::
make_optional
<
paddle
::
Tensor
>
(
grad_grad_out_grad
);
auto
&
axis
=
this
->
axis_
;
// Prepare Grad function call
const
auto
&
out_metas
=
OutputMeta
();
paddle
::
small_vector
<
std
::
vector
<
paddle
::
Tensor
>
,
egr
::
kSlotSmallVectorSize
>
returns
(
5
);
for
(
int
i
=
0
;
i
<
5
;
++
i
)
{
out_metas
[
i
].
size
()
==
0
?
returns
[
i
].
resize
(
1
)
:
returns
[
i
].
resize
(
out_metas
[
i
].
size
());
}
auto
*
api_output_0
=
(
out_metas
[
0
].
empty
()
||
out_metas
[
0
][
0
].
IsStopGradient
())
?
nullptr
:
&
returns
[
0
][
0
];
auto
*
api_output_1
=
(
out_metas
[
1
].
empty
()
||
out_metas
[
1
][
0
].
IsStopGradient
())
?
nullptr
:
&
returns
[
1
][
0
];
auto
*
api_output_2
=
(
out_metas
[
2
].
empty
()
||
out_metas
[
2
][
0
].
IsStopGradient
())
?
nullptr
:
&
returns
[
2
][
0
];
auto
*
api_output_3
=
(
out_metas
[
3
].
empty
()
||
out_metas
[
3
][
0
].
IsStopGradient
())
?
nullptr
:
&
returns
[
3
][
0
];
auto
*
api_output_4
=
(
out_metas
[
4
].
empty
()
||
out_metas
[
4
][
0
].
IsStopGradient
())
?
nullptr
:
&
returns
[
4
][
0
];
// Runtime check if we need next grad
bool
trace_backward
=
egr
::
Controller
::
Instance
().
HasGrad
()
&&
create_graph
;
// Inplace Check
// Inplace Strategy
VLOG
(
5
)
<<
"Running C++ API: "
<<
"multiply_triple_grad"
;
// Before log info
if
(
VLOG_IS_ON
(
3
))
{
const
char
*
INPUT_PRINT_TEMPLATE
=
"{ Input: [%s]} "
;
std
::
string
input_str
=
""
;
std
::
string
output_str
=
""
;
const
char
*
TENSOR_GRAD_X_GRAD_TEMPLATE
=
"
\n
( grad_x_grad , [%s]), "
;
std
::
string
input_grad_x_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_GRAD_X_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
grad_x_grad
));
input_str
+=
input_grad_x_grad_str
;
const
char
*
TENSOR_GRAD_Y_GRAD_TEMPLATE
=
"
\n
( grad_y_grad , [%s]), "
;
std
::
string
input_grad_y_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_GRAD_Y_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
grad_y_grad
));
input_str
+=
input_grad_y_grad_str
;
const
char
*
TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE
=
"
\n
( grad_grad_out_grad , [%s]), "
;
std
::
string
input_grad_grad_out_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
grad_grad_out_grad
));
input_str
+=
input_grad_grad_out_grad_str
;
const
char
*
TENSOR_X_TEMPLATE
=
"
\n
( x , [%s]), "
;
std
::
string
input_x_str
=
paddle
::
string
::
Sprintf
(
TENSOR_X_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
x
));
input_str
+=
input_x_str
;
const
char
*
TENSOR_Y_TEMPLATE
=
"
\n
( y , [%s]), "
;
std
::
string
input_y_str
=
paddle
::
string
::
Sprintf
(
TENSOR_Y_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
y
));
input_str
+=
input_y_str
;
const
char
*
TENSOR_FWD_GRAD_OUT_TEMPLATE
=
"
\n
( fwd_grad_out , [%s]), "
;
std
::
string
input_fwd_grad_out_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_OUT_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_out
));
input_str
+=
input_fwd_grad_out_str
;
const
char
*
TENSOR_FWD_GRAD_GRAD_X_TEMPLATE
=
"
\n
( fwd_grad_grad_x , [%s]), "
;
std
::
string
input_fwd_grad_grad_x_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_GRAD_X_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_grad_x
));
input_str
+=
input_fwd_grad_grad_x_str
;
const
char
*
TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE
=
"
\n
( fwd_grad_grad_y , [%s]), "
;
std
::
string
input_fwd_grad_grad_y_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_grad_y
));
input_str
+=
input_fwd_grad_grad_y_str
;
VLOG
(
3
)
<<
paddle
::
string
::
Sprintf
(
INPUT_PRINT_TEMPLATE
,
input_str
);
}
// Call grad_api function
paddle
::
experimental
::
multiply_triple_grad
(
x
,
y
,
fwd_grad_out
,
fwd_grad_grad_x_optional
,
fwd_grad_grad_y_optional
,
grad_x_grad_optional
,
grad_y_grad_optional
,
grad_grad_out_grad_optional
,
axis
,
api_output_0
,
api_output_1
,
api_output_2
,
api_output_3
,
api_output_4
);
// Check NaN and Inf id needed
if
(
FLAGS_check_nan_inf
)
{
try
{
egr
::
CheckTensorHasNanOrInf
(
"multiply_triple_grad"
,
returns
);
}
catch
(...)
{
LOG
(
WARNING
)
<<
"There are nan/inf in (multiply_triple_grad)"
;
auto
forward_trace
=
GetForwardTrace
();
std
::
cout
<<
forward_trace
<<
std
::
endl
;
std
::
rethrow_exception
(
std
::
current_exception
());
}
}
// Get GradOut autograd_meta
auto
&
x_grad
=
returns
[
0
][
0
];
egr
::
AutogradMeta
*
x_grad_autograd_meta
=
returns
[
0
][
0
].
initialized
()
?
egr
::
EagerUtils
::
autograd_meta
(
&
x_grad
)
:
nullptr
;
if
(
x_grad_autograd_meta
)
x_grad_autograd_meta
->
SetStopGradient
(
false
);
auto
&
y_grad
=
returns
[
1
][
0
];
egr
::
AutogradMeta
*
y_grad_autograd_meta
=
returns
[
1
][
0
].
initialized
()
?
egr
::
EagerUtils
::
autograd_meta
(
&
y_grad
)
:
nullptr
;
if
(
y_grad_autograd_meta
)
y_grad_autograd_meta
->
SetStopGradient
(
false
);
auto
&
fwd_grad_out_grad
=
returns
[
2
][
0
];
egr
::
AutogradMeta
*
fwd_grad_out_grad_autograd_meta
=
returns
[
2
][
0
].
initialized
()
?
egr
::
EagerUtils
::
autograd_meta
(
&
fwd_grad_out_grad
)
:
nullptr
;
if
(
fwd_grad_out_grad_autograd_meta
)
fwd_grad_out_grad_autograd_meta
->
SetStopGradient
(
false
);
auto
&
fwd_grad_grad_x_grad
=
returns
[
3
][
0
];
egr
::
AutogradMeta
*
fwd_grad_grad_x_grad_autograd_meta
=
returns
[
3
][
0
].
initialized
()
?
egr
::
EagerUtils
::
autograd_meta
(
&
fwd_grad_grad_x_grad
)
:
nullptr
;
if
(
fwd_grad_grad_x_grad_autograd_meta
)
fwd_grad_grad_x_grad_autograd_meta
->
SetStopGradient
(
false
);
auto
&
fwd_grad_grad_y_grad
=
returns
[
4
][
0
];
egr
::
AutogradMeta
*
fwd_grad_grad_y_grad_autograd_meta
=
returns
[
4
][
0
].
initialized
()
?
egr
::
EagerUtils
::
autograd_meta
(
&
fwd_grad_grad_y_grad
)
:
nullptr
;
if
(
fwd_grad_grad_y_grad_autograd_meta
)
fwd_grad_grad_y_grad_autograd_meta
->
SetStopGradient
(
false
);
// Create Grad Node
if
(
trace_backward
)
{
PADDLE_THROW
(
phi
::
errors
::
Unavailable
(
"The Op multiply_triple_grad doesn't have any grad"
"op. If you don't intend calculating higher order"
"derivatives, please set `create_graph`to False."
));
}
VLOG
(
4
)
<<
"Finish AD API GRAD: multiply_triple_grad"
;
// LOG IF DEBUG
if
(
VLOG_IS_ON
(
4
))
{
const
char
*
INPUT_PRINT_TEMPLATE
=
"{ Input: [%s],
\n
Output: [%s] } "
;
std
::
string
input_str
=
""
;
std
::
string
output_str
=
""
;
const
char
*
TENSOR_GRAD_X_GRAD_TEMPLATE
=
"
\n
( grad_x_grad , [%s]), "
;
std
::
string
input_grad_x_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_GRAD_X_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
grad_x_grad
));
input_str
+=
input_grad_x_grad_str
;
const
char
*
TENSOR_GRAD_Y_GRAD_TEMPLATE
=
"
\n
( grad_y_grad , [%s]), "
;
std
::
string
input_grad_y_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_GRAD_Y_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
grad_y_grad
));
input_str
+=
input_grad_y_grad_str
;
const
char
*
TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE
=
"
\n
( grad_grad_out_grad , [%s]), "
;
std
::
string
input_grad_grad_out_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_GRAD_GRAD_OUT_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
grad_grad_out_grad
));
input_str
+=
input_grad_grad_out_grad_str
;
const
char
*
TENSOR_X_TEMPLATE
=
"
\n
( x , [%s]), "
;
std
::
string
input_x_str
=
paddle
::
string
::
Sprintf
(
TENSOR_X_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
x
));
input_str
+=
input_x_str
;
const
char
*
TENSOR_Y_TEMPLATE
=
"
\n
( y , [%s]), "
;
std
::
string
input_y_str
=
paddle
::
string
::
Sprintf
(
TENSOR_Y_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
y
));
input_str
+=
input_y_str
;
const
char
*
TENSOR_FWD_GRAD_OUT_TEMPLATE
=
"
\n
( fwd_grad_out , [%s]), "
;
std
::
string
input_fwd_grad_out_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_OUT_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_out
));
input_str
+=
input_fwd_grad_out_str
;
const
char
*
TENSOR_FWD_GRAD_GRAD_X_TEMPLATE
=
"
\n
( fwd_grad_grad_x , [%s]), "
;
std
::
string
input_fwd_grad_grad_x_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_GRAD_X_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_grad_x
));
input_str
+=
input_fwd_grad_grad_x_str
;
const
char
*
TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE
=
"
\n
( fwd_grad_grad_y , [%s]), "
;
std
::
string
input_fwd_grad_grad_y_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_GRAD_Y_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_grad_y
));
input_str
+=
input_fwd_grad_grad_y_str
;
const
char
*
TENSOR_X_GRAD_TEMPLATE
=
"
\n
( x_grad , [%s]), "
;
std
::
string
output_x_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_X_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
x_grad
));
output_str
+=
output_x_grad_str
;
const
char
*
TENSOR_Y_GRAD_TEMPLATE
=
"
\n
( y_grad , [%s]), "
;
std
::
string
output_y_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_Y_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
y_grad
));
output_str
+=
output_y_grad_str
;
const
char
*
TENSOR_FWD_GRAD_OUT_GRAD_TEMPLATE
=
"
\n
( fwd_grad_out_grad , [%s]), "
;
std
::
string
output_fwd_grad_out_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_OUT_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_out_grad
));
output_str
+=
output_fwd_grad_out_grad_str
;
const
char
*
TENSOR_FWD_GRAD_GRAD_X_GRAD_TEMPLATE
=
"
\n
( fwd_grad_grad_x_grad , [%s]), "
;
std
::
string
output_fwd_grad_grad_x_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_GRAD_X_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_grad_x_grad
));
output_str
+=
output_fwd_grad_grad_x_grad_str
;
const
char
*
TENSOR_FWD_GRAD_GRAD_Y_GRAD_TEMPLATE
=
"
\n
( fwd_grad_grad_y_grad , [%s]), "
;
std
::
string
output_fwd_grad_grad_y_grad_str
=
paddle
::
string
::
Sprintf
(
TENSOR_FWD_GRAD_GRAD_Y_GRAD_TEMPLATE
,
egr
::
EagerUtils
::
TensorStr
(
fwd_grad_grad_y_grad
));
output_str
+=
output_fwd_grad_grad_y_grad_str
;
VLOG
(
4
)
<<
paddle
::
string
::
Sprintf
(
INPUT_PRINT_TEMPLATE
,
input_str
,
output_str
);
}
// Return
if
(
NeedComplexToRealConversion
())
HandleComplexGradToRealGrad
(
&
returns
);
return
returns
;
}
namespace
sparse
{
paddle
::
small_vector
<
std
::
vector
<
paddle
::
Tensor
>
,
egr
::
kSlotSmallVectorSize
>
MultiplyGradNode
::
operator
()(
...
...
paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h
浏览文件 @
10188e8f
...
...
@@ -312,69 +312,6 @@ class MultiplyDoubleGradNode : public egr::GradNodeBase {
int
axis_
=
-
1
;
};
class
MultiplyTripleGradNode
:
public
egr
::
GradNodeBase
{
public:
MultiplyTripleGradNode
()
:
egr
::
GradNodeBase
()
{}
MultiplyTripleGradNode
(
size_t
bwd_in_slot_num
,
size_t
bwd_out_slot_num
)
:
egr
::
GradNodeBase
(
bwd_in_slot_num
,
bwd_out_slot_num
)
{}
~
MultiplyTripleGradNode
()
override
=
default
;
virtual
paddle
::
small_vector
<
std
::
vector
<
paddle
::
Tensor
>
,
egr
::
kSlotSmallVectorSize
>
operator
()(
paddle
::
small_vector
<
std
::
vector
<
paddle
::
Tensor
>
,
// NOLINT
egr
::
kSlotSmallVectorSize
>&
grads
,
// NOLINT
bool
create_graph
=
false
,
bool
is_new_grad
=
false
)
override
;
std
::
string
name
()
override
{
return
"MultiplyTripleGradNode"
;
}
void
ClearTensorWrappers
()
override
{
x_
.
clear
();
y_
.
clear
();
fwd_grad_out_
.
clear
();
fwd_grad_grad_x_
.
clear
();
fwd_grad_grad_y_
.
clear
();
SetIsTensorWrappersCleared
(
true
);
}
std
::
shared_ptr
<
GradNodeBase
>
Copy
()
const
override
{
auto
copied_node
=
std
::
shared_ptr
<
MultiplyTripleGradNode
>
(
new
MultiplyTripleGradNode
(
*
this
));
return
copied_node
;
}
// SetTensorWrapperX, SetTensorWrapperY, ...
void
SetTensorWrapperx
(
const
paddle
::
Tensor
&
x
)
{
x_
=
egr
::
TensorWrapper
(
x
,
false
);
}
void
SetTensorWrappery
(
const
paddle
::
Tensor
&
y
)
{
y_
=
egr
::
TensorWrapper
(
y
,
false
);
}
void
SetTensorWrapperfwd_grad_out
(
const
paddle
::
Tensor
&
fwd_grad_out
)
{
fwd_grad_out_
=
egr
::
TensorWrapper
(
fwd_grad_out
,
false
);
}
void
SetTensorWrapperfwd_grad_grad_x
(
const
paddle
::
Tensor
&
fwd_grad_grad_x
)
{
fwd_grad_grad_x_
=
egr
::
TensorWrapper
(
fwd_grad_grad_x
,
false
);
}
void
SetTensorWrapperfwd_grad_grad_y
(
const
paddle
::
Tensor
&
fwd_grad_grad_y
)
{
fwd_grad_grad_y_
=
egr
::
TensorWrapper
(
fwd_grad_grad_y
,
false
);
}
// SetAttributes
void
SetAttributeaxis
(
const
int
&
axis
)
{
axis_
=
axis
;
}
private:
// TensorWrappers
egr
::
TensorWrapper
x_
;
egr
::
TensorWrapper
y_
;
egr
::
TensorWrapper
fwd_grad_out_
;
egr
::
TensorWrapper
fwd_grad_grad_x_
;
egr
::
TensorWrapper
fwd_grad_grad_y_
;
// Attributes
int
axis_
=
-
1
;
};
class
SyncBatchNormGradNode
:
public
egr
::
GradNodeBase
{
public:
SyncBatchNormGradNode
()
:
egr
::
GradNodeBase
()
{}
...
...
test/prim/prim/vjp/CMakeLists.txt
浏览文件 @
10188e8f
...
...
@@ -8,7 +8,7 @@ foreach(TEST_OP ${TEST_OPS})
py_test_modules
(
${
TEST_OP
}
MODULES
${
TEST_OP
}
ENVS
${
GC_ENVS
}
)
endforeach
()
set_tests_properties
(
test_comp_high_grad PROPERTIES TIMEOUT
5
0
)
set_tests_properties
(
test_comp_high_grad PROPERTIES TIMEOUT
10
0
)
add_subdirectory
(
eager
)
add_subdirectory
(
static
)
test/prim/prim/vjp/test_comp_high_grad.py
浏览文件 @
10188e8f
...
...
@@ -226,7 +226,6 @@ class TestSubtractHighGradCheck(unittest.TestCase):
self
.
func_triple
(
p
)
'''
@
param
.
parameterized_class
(
(
'shape1'
,
'shape2'
),
[
...
...
@@ -330,8 +329,6 @@ class TestMultiplyHighGradCheck(unittest.TestCase):
self
.
func_double
(
p
)
self
.
func_triple
(
p
)
'''
@
param
.
parameterized_class
(
(
'shape1'
),
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录