Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
d6b5d91c
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d6b5d91c
编写于
9月 09, 2022
作者:
C
Charles-hit
提交者:
GitHub
9月 09, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support cumsum flip reverse backward refuse forward (#45892)
上级
bc2265f8
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
232 addition
and
10 deletion
+232
-10
paddle/phi/api/yaml/legacy_backward.yaml
paddle/phi/api/yaml/legacy_backward.yaml
+1
-10
python/paddle/fluid/tests/unittests/test_cumsum_op.py
python/paddle/fluid/tests/unittests/test_cumsum_op.py
+77
-0
python/paddle/fluid/tests/unittests/test_flip.py
python/paddle/fluid/tests/unittests/test_flip.py
+77
-0
python/paddle/fluid/tests/unittests/test_reverse_op.py
python/paddle/fluid/tests/unittests/test_reverse_op.py
+77
-0
未找到文件。
paddle/phi/api/yaml/legacy_backward.yaml
浏览文件 @
d6b5d91c
...
...
@@ -592,9 +592,6 @@
-
backward_api
:
cumsum_grad
forward
:
cumsum(Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
args
:
(Tensor out_grad, Scalar axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(x_grad)
invoke
:
cumsum(out_grad, axis, flatten, exclusive, !reverse)
...
...
@@ -884,11 +881,7 @@
forward
:
flip (Tensor x, int[] axis) -> Tensor(out)
args
:
(Tensor out_grad, int[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
flip
invoke
:
flip(out_grad, axis)
-
backward_api
:
floor_grad
forward
:
floor(Tensor x) -> Tensor(out)
...
...
@@ -1971,8 +1964,6 @@
forward
:
reverse (Tensor x, IntArray axis) -> Tensor(out)
args
:
(Tensor out_grad, IntArray axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
ReverseInferMeta
invoke
:
reverse(out_grad, axis)
-
backward_api
:
roi_align_grad
...
...
python/paddle/fluid/tests/unittests/test_cumsum_op.py
浏览文件 @
d6b5d91c
...
...
@@ -24,6 +24,9 @@ import paddle.fluid.core as core
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
import
paddle.inference
as
paddle_infer
import
gradient_checker
from
decorator_helper
import
prog_scope
import
paddle.fluid.layers
as
layers
class
TestCumsumOp
(
unittest
.
TestCase
):
...
...
@@ -380,5 +383,79 @@ class TestTensorAxis(unittest.TestCase):
np
.
testing
.
assert_allclose
(
static_out
[
0
],
infer_out
)
class
TestCumsumDoubleGradCheck
(
unittest
.
TestCase
):
def
cumsum_wrapper
(
self
,
x
):
return
paddle
.
cumsum
(
x
[
0
],
0
)
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float64
data
=
layers
.
data
(
'data'
,
[
3
,
4
],
False
,
dtype
)
data
.
persistable
=
True
out
=
paddle
.
cumsum
(
data
,
0
)
data_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data
.
shape
).
astype
(
dtype
)
gradient_checker
.
double_grad_check
([
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
double_grad_check_for_dygraph
(
self
.
cumsum_wrapper
,
[
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
class
TestCumsumTripleGradCheck
(
unittest
.
TestCase
):
def
cumsum_wrapper
(
self
,
x
):
return
paddle
.
cumsum
(
x
[
0
],
0
)
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float32
data
=
layers
.
data
(
'data'
,
[
2
,
3
],
False
,
dtype
)
data
.
persistable
=
True
out
=
paddle
.
cumsum
(
data
,
0
)
data_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data
.
shape
).
astype
(
dtype
)
gradient_checker
.
triple_grad_check
([
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
triple_grad_check_for_dygraph
(
self
.
cumsum_wrapper
,
[
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_flip.py
浏览文件 @
d6b5d91c
...
...
@@ -21,6 +21,9 @@ import paddle.fluid as fluid
import
paddle.fluid.core
as
core
from
paddle.fluid
import
Program
,
program_guard
from
op_test
import
OpTest
import
gradient_checker
from
decorator_helper
import
prog_scope
import
paddle.fluid.layers
as
layers
class
TestFlipOp_API
(
unittest
.
TestCase
):
...
...
@@ -137,6 +140,80 @@ class TestFlipOpNegAxis(TestFlipOp):
self
.
axis
=
[
-
1
]
class
TestFlipDoubleGradCheck
(
unittest
.
TestCase
):
def
flip_wrapper
(
self
,
x
):
return
paddle
.
flip
(
x
[
0
],
[
0
,
1
])
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float32
data
=
layers
.
data
(
'data'
,
[
3
,
2
,
2
],
False
,
dtype
)
data
.
persistable
=
True
out
=
paddle
.
flip
(
data
,
[
0
,
1
])
data_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data
.
shape
).
astype
(
dtype
)
gradient_checker
.
double_grad_check
([
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
double_grad_check_for_dygraph
(
self
.
flip_wrapper
,
[
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
class
TestFlipTripleGradCheck
(
unittest
.
TestCase
):
def
flip_wrapper
(
self
,
x
):
return
paddle
.
flip
(
x
[
0
],
[
0
,
1
])
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float32
data
=
layers
.
data
(
'data'
,
[
3
,
2
,
2
],
False
,
dtype
)
data
.
persistable
=
True
out
=
paddle
.
flip
(
data
,
[
0
,
1
])
data_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data
.
shape
).
astype
(
dtype
)
gradient_checker
.
triple_grad_check
([
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
triple_grad_check_for_dygraph
(
self
.
flip_wrapper
,
[
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_reverse_op.py
浏览文件 @
d6b5d91c
...
...
@@ -21,6 +21,9 @@ from op_test import OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
import
gradient_checker
from
decorator_helper
import
prog_scope
import
paddle.fluid.layers
as
layers
from
paddle.fluid.framework
import
program_guard
,
Program
from
test_attribute_var
import
UnittestBase
...
...
@@ -267,6 +270,80 @@ class TestReverseAxisListTensor(TestReverseAxisTensor):
return
out
class
TestReverseDoubleGradCheck
(
unittest
.
TestCase
):
def
reverse_wrapper
(
self
,
x
):
return
fluid
.
layers
.
reverse
(
x
[
0
],
[
0
,
1
])
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float64
data
=
layers
.
data
(
'data'
,
[
3
,
4
],
False
,
dtype
)
data
.
persistable
=
True
out
=
fluid
.
layers
.
reverse
(
data
,
[
0
,
1
])
data_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data
.
shape
).
astype
(
dtype
)
gradient_checker
.
double_grad_check
([
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
double_grad_check_for_dygraph
(
self
.
reverse_wrapper
,
[
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
class
TestReverseTripleGradCheck
(
unittest
.
TestCase
):
def
reverse_wrapper
(
self
,
x
):
return
fluid
.
layers
.
reverse
(
x
[
0
],
[
0
,
1
])
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float32
data
=
layers
.
data
(
'data'
,
[
2
,
3
],
False
,
dtype
)
data
.
persistable
=
True
out
=
fluid
.
layers
.
reverse
(
data
,
[
0
,
1
])
data_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data
.
shape
).
astype
(
dtype
)
gradient_checker
.
triple_grad_check
([
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
triple_grad_check_for_dygraph
(
self
.
reverse_wrapper
,
[
data
],
out
,
x_init
=
[
data_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录