Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
ff1da188
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ff1da188
编写于
9月 13, 2022
作者:
C
Charles-hit
提交者:
GitHub
9月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support concat backward refuse forward (#45940)
上级
c6f173b0
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
82 addition
and
5 deletion
+82
-5
paddle/phi/api/yaml/legacy_backward.yaml
paddle/phi/api/yaml/legacy_backward.yaml
+1
-5
python/paddle/fluid/tests/unittests/test_concat_op.py
python/paddle/fluid/tests/unittests/test_concat_op.py
+81
-0
未找到文件。
paddle/phi/api/yaml/legacy_backward.yaml
浏览文件 @
ff1da188
...
@@ -430,11 +430,7 @@
...
@@ -430,11 +430,7 @@
forward
:
concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
forward
:
concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
args
:
(Tensor[] grad_x_grad, Scalar axis = 0)
args
:
(Tensor[] grad_x_grad, Scalar axis = 0)
output
:
Tensor(grad_out_grad)
output
:
Tensor(grad_out_grad)
infer_meta
:
invoke
:
concat(grad_x_grad, axis)
func
:
ConcatInferMeta
param
:
[
grad_x_grad
,
axis
]
kernel
:
func
:
concat
-
backward_api
:
concat_grad
-
backward_api
:
concat_grad
forward
:
concat (Tensor[] x, Scalar axis) -> Tensor(out)
forward
:
concat (Tensor[] x, Scalar axis) -> Tensor(out)
...
...
python/paddle/fluid/tests/unittests/test_concat_op.py
浏览文件 @
ff1da188
...
@@ -21,6 +21,9 @@ import paddle.fluid as fluid
...
@@ -21,6 +21,9 @@ import paddle.fluid as fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
,
core
from
paddle.fluid
import
compiler
,
Program
,
program_guard
,
core
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.framework
import
_test_eager_guard
import
paddle
import
paddle
import
gradient_checker
from
decorator_helper
import
prog_scope
import
paddle.fluid.layers
as
layers
class
TestConcatOp
(
OpTest
):
class
TestConcatOp
(
OpTest
):
...
@@ -451,5 +454,83 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
...
@@ -451,5 +454,83 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
res
[
0
],
np
.
concatenate
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
))
res
[
0
],
np
.
concatenate
([
self
.
x
]
*
self
.
iter_num
,
axis
=
self
.
axis
))
class
TestConcatDoubleGradCheck
(
unittest
.
TestCase
):
def
concat_wrapper
(
self
,
x
):
return
paddle
.
concat
(
x
)
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float32
data1
=
layers
.
data
(
'data1'
,
[
2
,
3
],
False
,
dtype
)
data1
.
persistable
=
True
data2
=
layers
.
data
(
'data2'
,
[
2
,
3
],
False
,
dtype
)
data2
.
persistable
=
True
out
=
paddle
.
concat
([
data1
,
data2
])
data1_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data1
.
shape
).
astype
(
dtype
)
data2_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data2
.
shape
).
astype
(
dtype
)
gradient_checker
.
double_grad_check
([
data1
,
data2
],
out
,
x_init
=
[
data1_arr
,
data2_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
double_grad_check_for_dygraph
(
self
.
concat_wrapper
,
[
data1
,
data2
],
out
,
x_init
=
[
data1_arr
,
data2_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
class
TestConcatTripleGradCheck
(
unittest
.
TestCase
):
def
concat_wrapper
(
self
,
x
):
return
paddle
.
concat
(
x
,
1
)
@
prog_scope
()
def
func
(
self
,
place
):
# the shape of input variable should be clearly specified, not inlcude -1.
eps
=
0.005
dtype
=
np
.
float32
data1
=
layers
.
data
(
'data1'
,
[
2
,
3
,
4
],
False
,
dtype
)
data1
.
persistable
=
True
data2
=
layers
.
data
(
'data2'
,
[
2
,
3
,
4
],
False
,
dtype
)
data2
.
persistable
=
True
out
=
paddle
.
concat
([
data1
,
data2
],
1
)
data1_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data1
.
shape
).
astype
(
dtype
)
data2_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
data2
.
shape
).
astype
(
dtype
)
gradient_checker
.
double_grad_check
([
data1
,
data2
],
out
,
x_init
=
[
data1_arr
,
data2_arr
],
place
=
place
,
eps
=
eps
)
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
gradient_checker
.
double_grad_check_for_dygraph
(
self
.
concat_wrapper
,
[
data1
,
data2
],
out
,
x_init
=
[
data1_arr
,
data2_arr
],
place
=
place
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录