Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
bb2733fa
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bb2733fa
编写于
11月 19, 2021
作者:
W
Weilong Wu
提交者:
GitHub
11月 19, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add dygraph triple grad test, broadcast case (#37377)
上级
b505ff96
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
82 addition
and
0 deletion
+82
-0
python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py
...ddle/fluid/tests/unittests/test_imperative_triple_grad.py
+82
-0
未找到文件。
python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py
浏览文件 @
bb2733fa
...
...
@@ -146,5 +146,87 @@ class TestDygraphTripleGrad(TestCase):
self
.
assertTrue
(
np
.
allclose
(
dddx_grad_actual
,
dddx_expected
))
class
TestDygraphTripleGradBradcastCase
(
TestCase
):
def
setUp
(
self
):
self
.
sort_sum_gradient
=
False
self
.
x_shape
=
[
3
,
2
,
2
]
self
.
y_shape
=
[
1
,
2
,
2
]
self
.
z_shape
=
[
2
,
2
]
def
grad
(
self
,
outputs
,
inputs
,
grad_outputs
=
None
,
no_grad_vars
=
None
,
retain_graph
=
None
,
create_graph
=
False
,
allow_unused
=
False
):
fluid
.
set_flags
({
'FLAGS_sort_sum_gradient'
:
self
.
sort_sum_gradient
})
return
fluid
.
dygraph
.
grad
(
outputs
=
outputs
,
inputs
=
inputs
,
grad_outputs
=
grad_outputs
,
no_grad_vars
=
no_grad_vars
,
retain_graph
=
retain_graph
,
create_graph
=
create_graph
,
allow_unused
=
allow_unused
)
@
dygraph_guard
def
test_example_with_gradient_and_create_graph
(
self
):
x
=
random_var
(
self
.
x_shape
)
x_np
=
x
.
numpy
()
x
.
stop_gradient
=
False
y
=
random_var
(
self
.
y_shape
)
y_np
=
y
.
numpy
()
y
.
stop_gradient
=
False
z
=
random_var
(
self
.
z_shape
)
z_np
=
z
.
numpy
()
numel
=
z_np
.
size
z
.
stop_gradient
=
False
out
=
fluid
.
layers
.
sigmoid
(
paddle
.
matmul
(
x
,
y
)
+
z
)
out_np
=
out
.
numpy
()
dx_actual
,
=
self
.
grad
([
out
],
[
x
],
create_graph
=
True
)
# Theoritical result based on math calculation
dout
=
np
.
ones
(
self
.
x_shape
).
astype
(
'float32'
)
dx_expected
=
np
.
matmul
(
dout
*
out_np
*
(
1
-
out_np
),
np
.
transpose
(
y_np
,
axes
=
(
0
,
2
,
1
)))
self
.
assertTrue
(
np
.
allclose
(
dx_actual
.
numpy
(),
dx_expected
))
ddx_actual
,
=
self
.
grad
([
dx_actual
],
[
x
],
create_graph
=
True
)
# Theoritical result based on math calculation
DDY
=
np
.
zeros
(
self
.
y_shape
).
astype
(
'float32'
)
DDX
=
np
.
ones
(
self
.
x_shape
).
astype
(
'float32'
)
double_grad_tmp1
=
np
.
matmul
(
dout
*
out_np
*
(
1
-
out_np
),
np
.
transpose
(
DDY
,
axes
=
(
0
,
2
,
1
)))
double_grad_tmp2
=
np
.
matmul
(
DDX
,
y_np
)
+
np
.
matmul
(
x_np
,
DDY
)
double_grad_tmp3
=
(
1
-
2
*
out_np
)
*
dout
*
double_grad_tmp2
*
out_np
*
(
1
-
out_np
)
ddx_expected
=
double_grad_tmp1
+
np
.
matmul
(
double_grad_tmp3
,
np
.
transpose
(
y_np
,
axes
=
(
0
,
2
,
1
)))
self
.
assertTrue
(
np
.
allclose
(
ddx_actual
.
numpy
(),
ddx_expected
))
# Theoritical result based on math calculation
d_ddout
=
np
.
zeros
(
self
.
x_shape
).
astype
(
'float32'
)
tmp0
=
np
.
matmul
(
DDX
,
y_np
)
+
np
.
matmul
(
x_np
,
DDY
)
tmp1
=
(
1
-
2
*
out_np
)
*
((
1
-
2
*
out_np
)
*
dout
*
tmp0
*
tmp0
)
tmp2
=
tmp0
*
(
1
-
2
*
out_np
)
*
d_ddout
-
2
*
dout
*
(
1
-
out_np
)
*
out_np
*
tmp0
*
tmp0
dddx_expected
=
np
.
matmul
(
((
tmp1
+
tmp2
)
*
out_np
*
(
1
-
out_np
)),
np
.
transpose
(
y_np
,
axes
=
(
0
,
2
,
1
)))
ddx_actual
.
backward
()
dddx_grad_actual
=
x
.
gradient
()
self
.
assertTrue
(
np
.
allclose
(
dddx_grad_actual
,
dddx_expected
))
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录