Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b927ce81
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b927ce81
编写于
1月 17, 2023
作者:
J
Jiabin Yang
提交者:
GitHub
1月 17, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add test for composite with dy2st (#49873)
上级
791637cf
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
269 addition
and
28 deletion
+269
-28
paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
+3
-0
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+8
-4
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_add_grad.py
...ests/unittests/prim/prim/vjp/static/test_comp_add_grad.py
+44
-7
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_add_tanh_grad.py
...unittests/prim/prim/vjp/static/test_comp_add_tanh_grad.py
+50
-5
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_div_grad.py
...ests/unittests/prim/prim/vjp/static/test_comp_div_grad.py
+42
-3
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_sqrt_grad.py
...sts/unittests/prim/prim/vjp/static/test_comp_sqrt_grad.py
+40
-3
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_sub_grad.py
...ests/unittests/prim/prim/vjp/static/test_comp_sub_grad.py
+42
-3
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_tanh_grad.py
...sts/unittests/prim/prim/vjp/static/test_comp_tanh_grad.py
+40
-3
未找到文件。
paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
浏览文件 @
b927ce81
...
...
@@ -477,6 +477,9 @@ class GradCompositeOpMakerBase {
void
RecoverOutputName
(
const
paddle
::
experimental
::
Tensor
&
output
,
const
std
::
string
&
origin_name
)
{
if
(
origin_name
==
framework
::
kEmptyVarName
)
return
;
VLOG
(
4
)
<<
"Recover: "
<<
static_cast
<
prim
::
DescTensor
*>
(
output
.
impl
().
get
())
->
Name
()
<<
" To: "
<<
origin_name
;
prim
::
StaticCompositeContext
::
Instance
().
GetBlock
()
->
RenameVar
(
static_cast
<
prim
::
DescTensor
*>
(
output
.
impl
().
get
())
->
Name
(),
origin_name
);
...
...
python/paddle/fluid/backward.py
浏览文件 @
b927ce81
...
...
@@ -1492,11 +1492,15 @@ def _append_backward_ops_(
)
# remove some backward ops
not_need_ops
=
_find_not_need_ops
(
grad_op_descs
,
ops
,
input_grad_names_set
)
# TODO(Jiabin): Support this in prime later, it will prune add_grad, fix this problem
if
not
core
.
is_prim_enabled
():
not_need_ops
=
_find_not_need_ops
(
grad_op_descs
,
ops
,
input_grad_names_set
)
grad_op_descs
=
[
op_desc
for
op_desc
in
grad_op_descs
if
op_desc
not
in
not_need_ops
]
grad_op_descs
=
[
op_desc
for
op_desc
in
grad_op_descs
if
op_desc
not
in
not_need_ops
]
# append op_desc in grad_op_descs to target_block
op_role_attr_name
=
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
()
...
...
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_add_grad.py
浏览文件 @
b927ce81
...
...
@@ -21,6 +21,23 @@ import paddle
from
paddle.fluid
import
core
def
apply_to_static
(
net
,
use_cinn
):
build_strategy
=
paddle
.
static
.
BuildStrategy
()
build_strategy
.
build_cinn_pass
=
use_cinn
return
paddle
.
jit
.
to_static
(
net
,
build_strategy
=
build_strategy
)
class
PrimeNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
super
(
PrimeNet
,
self
).
__init__
()
self
.
fc
=
paddle
.
nn
.
Linear
(
4
,
4
)
def
forward
(
self
,
x
,
y
):
tmp
=
self
.
fc
(
x
)
out
=
paddle
.
add
(
tmp
,
y
)
return
out
@
param
.
parameterized_class
(
(
'primal0'
,
'primal1'
,
'dtype'
),
[
...
...
@@ -57,11 +74,33 @@ class TestAddGradComp(unittest.TestCase):
cls
.
primal0
=
cls
.
primal0
.
astype
(
cls
.
dtype
)
cls
.
primal1
=
cls
.
primal1
.
astype
(
cls
.
dtype
)
def
setUp
(
self
):
paddle
.
enable_static
()
def
train
(
self
,
use_prim
,
use_cinn
):
paddle
.
seed
(
2022
)
self
.
x
=
paddle
.
randn
([
2
,
4
])
self
.
y
=
paddle
.
randn
([
2
,
4
])
self
.
x
.
stop_gradient
=
False
self
.
y
.
stop_gradient
=
False
net
=
PrimeNet
()
core
.
set_prim_enabled
(
use_prim
)
net
=
apply_to_static
(
net
,
use_cinn
)
out
=
net
(
self
.
x
,
self
.
y
)
res
=
paddle
.
autograd
.
grad
(
out
,
[
self
.
x
,
self
.
y
])
return
res
def
te
arDow
n
(
self
):
def
te
st_cin
n
(
self
):
paddle
.
disable_static
()
dy_res
=
self
.
train
(
use_prim
=
False
,
use_cinn
=
False
)
comp_st_cinn_res
=
self
.
train
(
use_prim
=
True
,
use_cinn
=
False
)
for
i
in
range
(
len
(
dy_res
)):
np
.
testing
.
assert_allclose
(
comp_st_cinn_res
[
i
].
numpy
(),
dy_res
[
i
].
numpy
(),
rtol
=
1e-7
,
atol
=
1e-7
,
)
paddle
.
enable_static
()
def
test_tanh_grad_comp
(
self
):
def
actual
(
primal0
,
primal1
):
...
...
@@ -73,8 +112,7 @@ class TestAddGradComp(unittest.TestCase):
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
z
=
paddle
.
add
(
x
,
y
)
out
=
paddle
.
tanh
(
z
)
res
=
paddle
.
static
.
gradients
([
out
],
[
x
,
y
])
res
=
paddle
.
static
.
gradients
([
z
],
[
x
,
y
])
exe
=
paddle
.
static
.
Executor
()
exe
.
run
(
sp
)
out
=
exe
.
run
(
...
...
@@ -100,8 +138,7 @@ class TestAddGradComp(unittest.TestCase):
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
z
=
paddle
.
add
(
x
,
y
)
out
=
paddle
.
tanh
(
z
)
res
=
paddle
.
static
.
gradients
([
out
],
[
x
,
y
])
res
=
paddle
.
static
.
gradients
([
z
],
[
x
,
y
])
exe
=
paddle
.
static
.
Executor
()
exe
.
run
(
sp
)
out
=
exe
.
run
(
...
...
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_add_tanh_grad.py
浏览文件 @
b927ce81
...
...
@@ -21,6 +21,24 @@ import paddle
from
paddle.fluid
import
core
def
apply_to_static
(
net
,
use_cinn
):
build_strategy
=
paddle
.
static
.
BuildStrategy
()
build_strategy
.
build_cinn_pass
=
use_cinn
return
paddle
.
jit
.
to_static
(
net
,
build_strategy
=
build_strategy
)
class
PrimeNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
super
(
PrimeNet
,
self
).
__init__
()
self
.
fc
=
paddle
.
nn
.
Linear
(
4
,
4
)
def
forward
(
self
,
x
,
y
):
tmp
=
self
.
fc
(
x
)
out
=
paddle
.
add
(
tmp
,
y
)
res
=
paddle
.
tanh
(
out
)
return
res
@
param
.
parameterized_class
(
(
'primal0'
,
'primal1'
,
'dtype'
),
[
...
...
@@ -57,13 +75,37 @@ class TestDivGradComp(unittest.TestCase):
cls
.
primal0
=
cls
.
primal0
.
astype
(
cls
.
dtype
)
cls
.
primal1
=
cls
.
primal1
.
astype
(
cls
.
dtype
)
def
setUp
(
self
):
paddle
.
enable_static
()
def
train
(
self
,
use_prim
,
use_cinn
):
paddle
.
seed
(
2022
)
self
.
x
=
paddle
.
randn
([
2
,
4
])
self
.
y
=
paddle
.
randn
([
2
,
4
])
self
.
x
.
stop_gradient
=
False
self
.
y
.
stop_gradient
=
False
net
=
PrimeNet
()
core
.
set_prim_enabled
(
use_prim
)
net
=
apply_to_static
(
net
,
use_cinn
)
out
=
net
(
self
.
x
,
self
.
y
)
res
=
paddle
.
autograd
.
grad
(
out
,
[
self
.
x
,
self
.
y
])
return
res
def
te
arDow
n
(
self
):
def
te
st_cin
n
(
self
):
paddle
.
disable_static
()
dy_res
=
self
.
train
(
use_prim
=
False
,
use_cinn
=
False
)
comp_st_cinn_res
=
self
.
train
(
use_prim
=
True
,
use_cinn
=
False
)
for
i
in
range
(
len
(
dy_res
)):
np
.
testing
.
assert_allclose
(
comp_st_cinn_res
[
i
].
numpy
(),
dy_res
[
i
].
numpy
(),
rtol
=
1e-7
,
atol
=
1e-7
,
)
paddle
.
enable_static
()
def
test_tanh_grad_comp
(
self
):
paddle
.
enable_static
()
def
actual
(
primal0
,
primal1
):
core
.
set_prim_enabled
(
True
)
mp
,
sp
=
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
...
...
@@ -73,7 +115,8 @@ class TestDivGradComp(unittest.TestCase):
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
z
=
paddle
.
add
(
x
,
y
)
res
=
paddle
.
static
.
gradients
([
z
],
[
x
,
y
])
out
=
paddle
.
tanh
(
z
)
res
=
paddle
.
static
.
gradients
([
out
],
[
x
,
y
])
exe
=
paddle
.
static
.
Executor
()
exe
.
run
(
sp
)
out
=
exe
.
run
(
...
...
@@ -99,7 +142,8 @@ class TestDivGradComp(unittest.TestCase):
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
z
=
paddle
.
add
(
x
,
y
)
res
=
paddle
.
static
.
gradients
([
z
],
[
x
,
y
])
out
=
paddle
.
tanh
(
z
)
res
=
paddle
.
static
.
gradients
([
out
],
[
x
,
y
])
exe
=
paddle
.
static
.
Executor
()
exe
.
run
(
sp
)
out
=
exe
.
run
(
...
...
@@ -129,6 +173,7 @@ class TestDivGradComp(unittest.TestCase):
atol
=
0
,
)
core
.
set_prim_enabled
(
False
)
paddle
.
disable_static
()
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_div_grad.py
浏览文件 @
b927ce81
...
...
@@ -21,6 +21,23 @@ import paddle
from
paddle.fluid
import
core
def
apply_to_static
(
net
,
use_cinn
):
build_strategy
=
paddle
.
static
.
BuildStrategy
()
build_strategy
.
build_cinn_pass
=
use_cinn
return
paddle
.
jit
.
to_static
(
net
,
build_strategy
=
build_strategy
)
class
PrimeNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
super
(
PrimeNet
,
self
).
__init__
()
self
.
fc
=
paddle
.
nn
.
Linear
(
4
,
4
)
def
forward
(
self
,
x
,
y
):
tmp
=
self
.
fc
(
x
)
out
=
paddle
.
divide
(
tmp
,
y
)
return
out
@
param
.
parameterized_class
(
(
'primal0'
,
'primal1'
,
'dtype'
),
[
...
...
@@ -57,11 +74,33 @@ class TestDivGradComp(unittest.TestCase):
cls
.
primal0
=
cls
.
primal0
.
astype
(
cls
.
dtype
)
cls
.
primal1
=
cls
.
primal1
.
astype
(
cls
.
dtype
)
def
setUp
(
self
):
paddle
.
enable_static
()
def
train
(
self
,
use_prim
,
use_cinn
):
paddle
.
seed
(
2022
)
self
.
x
=
paddle
.
randn
([
2
,
4
])
self
.
y
=
paddle
.
randn
([
2
,
4
])
self
.
x
.
stop_gradient
=
False
self
.
y
.
stop_gradient
=
False
net
=
PrimeNet
()
core
.
set_prim_enabled
(
use_prim
)
net
=
apply_to_static
(
net
,
use_cinn
)
out
=
net
(
self
.
x
,
self
.
y
)
res
=
paddle
.
autograd
.
grad
(
out
,
[
self
.
x
,
self
.
y
])
return
res
def
te
arDow
n
(
self
):
def
te
st_cin
n
(
self
):
paddle
.
disable_static
()
dy_res
=
self
.
train
(
use_prim
=
False
,
use_cinn
=
False
)
comp_st_cinn_res
=
self
.
train
(
use_prim
=
True
,
use_cinn
=
False
)
for
i
in
range
(
len
(
dy_res
)):
np
.
testing
.
assert_allclose
(
comp_st_cinn_res
[
i
].
numpy
(),
dy_res
[
i
].
numpy
(),
rtol
=
1e-6
,
atol
=
1e-6
,
)
paddle
.
enable_static
()
def
test_tanh_grad_comp
(
self
):
def
actual
(
primal0
,
primal1
):
...
...
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_sqrt_grad.py
浏览文件 @
b927ce81
...
...
@@ -26,6 +26,23 @@ import parameterized as param
import
paddle
def
apply_to_static
(
net
,
use_cinn
):
build_strategy
=
paddle
.
static
.
BuildStrategy
()
build_strategy
.
build_cinn_pass
=
use_cinn
return
paddle
.
jit
.
to_static
(
net
,
build_strategy
=
build_strategy
)
class
PrimeNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
super
(
PrimeNet
,
self
).
__init__
()
self
.
fc
=
paddle
.
nn
.
Linear
(
4
,
4
)
def
forward
(
self
,
x
):
tmp
=
self
.
fc
(
x
)
out
=
paddle
.
sqrt
(
tmp
)
return
out
@
param
.
parameterized_class
(
(
'primal'
,
'cotangent'
,
'dtype'
),
[
...
...
@@ -38,11 +55,31 @@ class TestSqrtGradComp(unittest.TestCase):
cls
.
primal
=
cls
.
primal
.
astype
(
cls
.
dtype
)
cls
.
cotangent
=
cls
.
cotangent
.
astype
(
cls
.
dtype
)
def
setUp
(
self
):
paddle
.
enable_static
()
def
train
(
self
,
use_prim
,
use_cinn
):
paddle
.
seed
(
2022
)
self
.
x
=
paddle
.
randn
([
2
,
4
])
self
.
x
.
stop_gradient
=
False
net
=
PrimeNet
()
core
.
set_prim_enabled
(
use_prim
)
net
=
apply_to_static
(
net
,
use_cinn
)
out
=
net
(
self
.
x
)
res
=
paddle
.
autograd
.
grad
(
out
,
[
self
.
x
])
return
res
def
te
arDow
n
(
self
):
def
te
st_cin
n
(
self
):
paddle
.
disable_static
()
dy_res
=
self
.
train
(
use_prim
=
False
,
use_cinn
=
False
)
comp_st_cinn_res
=
self
.
train
(
use_prim
=
True
,
use_cinn
=
False
)
for
i
in
range
(
len
(
dy_res
)):
np
.
testing
.
assert_allclose
(
comp_st_cinn_res
[
i
].
numpy
(),
dy_res
[
i
].
numpy
(),
rtol
=
1e-7
,
atol
=
1e-7
,
)
paddle
.
enable_static
()
def
test_sqrt_grad_comp
(
self
):
def
actual
(
primal
,
cotangent
):
...
...
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_sub_grad.py
浏览文件 @
b927ce81
...
...
@@ -21,6 +21,23 @@ import paddle
from
paddle.fluid
import
core
def
apply_to_static
(
net
,
use_cinn
):
build_strategy
=
paddle
.
static
.
BuildStrategy
()
build_strategy
.
build_cinn_pass
=
use_cinn
return
paddle
.
jit
.
to_static
(
net
,
build_strategy
=
build_strategy
)
class
PrimeNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
super
(
PrimeNet
,
self
).
__init__
()
self
.
fc
=
paddle
.
nn
.
Linear
(
4
,
4
)
def
forward
(
self
,
x
,
y
):
tmp
=
self
.
fc
(
x
)
out
=
paddle
.
subtract
(
tmp
,
y
)
return
out
@
param
.
parameterized_class
(
(
'primal0'
,
'primal1'
,
'dtype'
),
[
...
...
@@ -58,11 +75,33 @@ class TestDivGradComp(unittest.TestCase):
cls
.
primal0
=
cls
.
primal0
.
astype
(
cls
.
dtype
)
cls
.
primal1
=
cls
.
primal1
.
astype
(
cls
.
dtype
)
def
setUp
(
self
):
paddle
.
enable_static
()
def
train
(
self
,
use_prim
,
use_cinn
):
paddle
.
seed
(
2022
)
self
.
x
=
paddle
.
randn
([
2
,
4
])
self
.
y
=
paddle
.
randn
([
2
,
4
])
self
.
x
.
stop_gradient
=
False
self
.
y
.
stop_gradient
=
False
net
=
PrimeNet
()
core
.
set_prim_enabled
(
use_prim
)
net
=
apply_to_static
(
net
,
use_cinn
)
out
=
net
(
self
.
x
,
self
.
y
)
res
=
paddle
.
autograd
.
grad
(
out
,
[
self
.
x
,
self
.
y
])
return
res
def
te
arDow
n
(
self
):
def
te
st_cin
n
(
self
):
paddle
.
disable_static
()
dy_res
=
self
.
train
(
use_prim
=
False
,
use_cinn
=
False
)
comp_st_cinn_res
=
self
.
train
(
use_prim
=
True
,
use_cinn
=
False
)
for
i
in
range
(
len
(
dy_res
)):
np
.
testing
.
assert_allclose
(
comp_st_cinn_res
[
i
].
numpy
(),
dy_res
[
i
].
numpy
(),
rtol
=
1e-7
,
atol
=
1e-7
,
)
paddle
.
enable_static
()
def
test_tanh_grad_comp
(
self
):
def
actual
(
primal0
,
primal1
):
...
...
python/paddle/fluid/tests/unittests/prim/prim/vjp/static/test_comp_tanh_grad.py
浏览文件 @
b927ce81
...
...
@@ -26,6 +26,23 @@ import parameterized as param
import
paddle
def
apply_to_static
(
net
,
use_cinn
):
build_strategy
=
paddle
.
static
.
BuildStrategy
()
build_strategy
.
build_cinn_pass
=
use_cinn
return
paddle
.
jit
.
to_static
(
net
,
build_strategy
=
build_strategy
)
class
PrimeNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
super
(
PrimeNet
,
self
).
__init__
()
self
.
fc
=
paddle
.
nn
.
Linear
(
4
,
4
)
def
forward
(
self
,
x
):
tmp
=
self
.
fc
(
x
)
out
=
paddle
.
tanh
(
tmp
)
return
out
@
param
.
parameterized_class
(
(
'primal'
,
'cotangent'
,
'dtype'
),
[
...
...
@@ -38,11 +55,31 @@ class TestTanhGradComp(unittest.TestCase):
cls
.
primal
=
cls
.
primal
.
astype
(
cls
.
dtype
)
cls
.
cotangent
=
cls
.
cotangent
.
astype
(
cls
.
dtype
)
def
setUp
(
self
):
paddle
.
enable_static
()
def
train
(
self
,
use_prim
,
use_cinn
):
paddle
.
seed
(
2022
)
self
.
x
=
paddle
.
randn
([
2
,
4
])
self
.
x
.
stop_gradient
=
False
net
=
PrimeNet
()
core
.
set_prim_enabled
(
use_prim
)
net
=
apply_to_static
(
net
,
use_cinn
)
out
=
net
(
self
.
x
)
res
=
paddle
.
autograd
.
grad
(
out
,
[
self
.
x
])
return
res
def
te
arDow
n
(
self
):
def
te
st_cin
n
(
self
):
paddle
.
disable_static
()
dy_res
=
self
.
train
(
use_prim
=
False
,
use_cinn
=
False
)
comp_st_cinn_res
=
self
.
train
(
use_prim
=
True
,
use_cinn
=
False
)
for
i
in
range
(
len
(
dy_res
)):
np
.
testing
.
assert_allclose
(
comp_st_cinn_res
[
i
].
numpy
(),
dy_res
[
i
].
numpy
(),
rtol
=
1e-7
,
atol
=
1e-7
,
)
paddle
.
enable_static
()
def
test_tanh_grad_comp
(
self
):
def
actual
(
primal
,
cotangent
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录