Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1f93de31
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1f93de31
编写于
12月 14, 2022
作者:
姜
姜永久
提交者:
GitHub
12月 15, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rm unittest eager guard tests part20 sparse_mv2split (#48879)
上级
eb322853
变更
6
展开全部
隐藏空白更改
内联
并排
Showing
6 changed file
with
625 addition
and
695 deletion
+625
-695
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
+62
-65
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
...on/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
+37
-40
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
...on/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
+81
-88
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
.../paddle/fluid/tests/unittests/test_sparse_transpose_op.py
+23
-25
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
+371
-410
python/paddle/fluid/tests/unittests/test_split_op.py
python/paddle/fluid/tests/unittests/test_split_op.py
+51
-67
未找到文件。
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
浏览文件 @
1f93de31
...
...
@@ -19,7 +19,6 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
seed
(
100
)
...
...
@@ -43,38 +42,37 @@ def get_cuda_version():
class
TestCsrMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
origin_x
=
origin_x
*
mask
origin_vec
=
paddle
.
rand
([
32
])
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_vec
=
origin_vec
.
detach
()
dense_vec
.
stop_gradient
=
False
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
.
backward
()
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
.
stop_gradient
=
False
sp_vec
=
origin_vec
.
detach
()
sp_vec
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
)
np
.
testing
.
assert_allclose
(
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
)
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
origin_x
=
origin_x
*
mask
origin_vec
=
paddle
.
rand
([
32
])
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_vec
=
origin_vec
.
detach
()
dense_vec
.
stop_gradient
=
False
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
.
backward
()
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
.
stop_gradient
=
False
sp_vec
=
origin_vec
.
detach
()
sp_vec
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
)
np
.
testing
.
assert_allclose
(
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
)
@
unittest
.
skipIf
(
...
...
@@ -84,38 +82,37 @@ class TestCsrMv(unittest.TestCase):
class
TestCooMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
origin_x
=
origin_x
*
mask
origin_vec
=
paddle
.
rand
([
32
])
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_vec
=
origin_vec
.
detach
()
dense_vec
.
stop_gradient
=
False
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
.
backward
()
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
sparse_dim
=
2
)
sp_x
.
stop_gradient
=
False
sp_vec
=
origin_vec
.
detach
()
sp_vec
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
)
np
.
testing
.
assert_allclose
(
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
)
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
origin_x
=
origin_x
*
mask
origin_vec
=
paddle
.
rand
([
32
])
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_vec
=
origin_vec
.
detach
()
dense_vec
.
stop_gradient
=
False
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
.
backward
()
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
sparse_dim
=
2
)
sp_x
.
stop_gradient
=
False
sp_vec
=
origin_vec
.
detach
()
sp_vec
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
)
np
.
testing
.
assert_allclose
(
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
浏览文件 @
1f93de31
...
...
@@ -18,7 +18,6 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestMaxPool3DFunc
(
unittest
.
TestCase
):
...
...
@@ -42,32 +41,31 @@ class TestMaxPool3DFunc(unittest.TestCase):
self
.
setPadding
()
def
test
(
self
):
with
_test_eager_guard
():
self
.
setUp
()
self
.
dense_x
.
stop_gradient
=
False
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
sparse_out
=
paddle
.
sparse
.
nn
.
functional
.
max_pool3d
(
sparse_x
,
self
.
kernel_sizes
,
stride
=
self
.
strides
,
padding
=
self
.
paddings
,
)
out
=
sparse_out
.
to_dense
()
out
.
backward
(
out
)
dense_x
=
copy
.
deepcopy
(
self
.
dense_x
)
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_x
,
self
.
kernel_sizes
,
stride
=
self
.
strides
,
padding
=
self
.
paddings
,
data_format
=
'NDHWC'
,
)
dense_out
.
backward
(
dense_out
)
# compare with dense
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
assert
np
.
allclose
(
dense_x
.
grad
.
numpy
(),
self
.
dense_x
.
grad
.
numpy
())
self
.
setUp
()
self
.
dense_x
.
stop_gradient
=
False
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
sparse_out
=
paddle
.
sparse
.
nn
.
functional
.
max_pool3d
(
sparse_x
,
self
.
kernel_sizes
,
stride
=
self
.
strides
,
padding
=
self
.
paddings
,
)
out
=
sparse_out
.
to_dense
()
out
.
backward
(
out
)
dense_x
=
copy
.
deepcopy
(
self
.
dense_x
)
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_x
,
self
.
kernel_sizes
,
stride
=
self
.
strides
,
padding
=
self
.
paddings
,
data_format
=
'NDHWC'
,
)
dense_out
.
backward
(
dense_out
)
# compare with dense
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
assert
np
.
allclose
(
dense_x
.
grad
.
numpy
(),
self
.
dense_x
.
grad
.
numpy
())
class
TestStride
(
TestMaxPool3DFunc
):
...
...
@@ -102,19 +100,18 @@ class TestInput(TestMaxPool3DFunc):
class
TestMaxPool3DAPI
(
unittest
.
TestCase
):
def
test
(
self
):
with
_test_eager_guard
():
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
max_pool3d
=
paddle
.
sparse
.
nn
.
MaxPool3D
(
kernel_size
=
3
,
data_format
=
'NDHWC'
)
out
=
max_pool3d
(
sparse_x
)
out
=
out
.
to_dense
()
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_x
,
3
,
data_format
=
'NDHWC'
)
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
max_pool3d
=
paddle
.
sparse
.
nn
.
MaxPool3D
(
kernel_size
=
3
,
data_format
=
'NDHWC'
)
out
=
max_pool3d
(
sparse_x
)
out
=
out
.
to_dense
()
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_x
,
3
,
data_format
=
'NDHWC'
)
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
浏览文件 @
1f93de31
...
...
@@ -18,117 +18,110 @@ import numpy as np
import
scipy.sparse
as
sp
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
np
.
random
.
seed
(
2022
)
class
TestCsrSoftmax
(
unittest
.
TestCase
):
def
test_softmax2d
(
self
):
with
_test_eager_guard
():
mask
=
np
.
random
.
rand
(
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
16
,
128
)
*
mask
np_csr
=
sp
.
csr_matrix
(
np_x
)
mask
=
np
.
random
.
rand
(
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
16
,
128
)
*
mask
np_csr
=
sp
.
csr_matrix
(
np_x
)
row_number
=
np_csr
.
shape
[
0
]
np_out
=
np
.
array
([])
for
i
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
i
]
end
=
np_csr
.
indptr
[
i
+
1
]
if
start
==
end
:
continue
x
=
np_csr
.
data
[
start
:
end
]
x_max
=
np
.
max
(
x
,
keepdims
=
True
)
x_exp
=
np
.
exp
(
x
-
x_max
)
x_exp_sum
=
np
.
sum
(
x_exp
,
keepdims
=
True
)
np_out
=
np
.
concatenate
([
np_out
,
x_exp
/
x_exp_sum
])
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
out
=
m
(
csr
)
np
.
testing
.
assert_allclose
(
out
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
out
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out
.
backward
(
csr
.
detach
())
dx
=
np
.
array
([])
for
i
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
i
]
end
=
np_csr
.
indptr
[
i
+
1
]
if
start
==
end
:
continue
out
=
np_out
[
start
:
end
]
dout
=
np_csr
.
data
[
start
:
end
]
sum
=
np
.
sum
(
dout
*
out
,
keepdims
=
True
)
dx
=
np
.
concatenate
([
dx
,
(
dout
-
sum
)
*
out
])
np
.
testing
.
assert_allclose
(
csr
.
grad
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
def
test_softmax3d
(
self
):
batchNum
=
16
mask
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
*
mask
np_out_list
=
[]
np_out
=
np
.
array
([])
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
row_number
=
np_csr
.
shape
[
0
]
np_out
=
np
.
array
([])
for
i
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
i
]
end
=
np_csr
.
indptr
[
i
+
1
]
for
j
in
range
(
row_number
,
):
start
=
np_csr
.
indptr
[
j
]
end
=
np_csr
.
indptr
[
j
+
1
]
if
start
==
end
:
continue
x
=
np_csr
.
data
[
start
:
end
]
x_max
=
np
.
max
(
x
,
keepdims
=
True
)
x_exp
=
np
.
exp
(
x
-
x_max
)
x_exp_sum
=
np
.
sum
(
x_exp
,
keepdims
=
True
)
np_out_list
.
append
(
x_exp
/
x_exp_sum
)
np_out
=
np
.
concatenate
([
np_out
,
x_exp
/
x_exp_sum
])
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
out
=
m
(
csr
)
np
.
testing
.
assert_allclose
(
out
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
out
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out
.
backward
(
csr
.
detach
())
dx
=
np
.
array
([])
for
i
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
i
]
end
=
np_csr
.
indptr
[
i
+
1
]
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
out
=
m
(
csr
)
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out
.
backward
(
csr
.
detach
())
dx
=
np
.
array
([])
batch_offset
=
0
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
row_number
=
np_csr
.
shape
[
0
]
for
j
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
j
]
end
=
np_csr
.
indptr
[
j
+
1
]
if
start
==
end
:
continue
out
=
np_out
[
start
:
end
]
dout
=
np_csr
.
data
[
start
:
end
]
out
=
np_out
[
batch_offset
+
start
:
batch_offset
+
end
]
sum
=
np
.
sum
(
dout
*
out
,
keepdims
=
True
)
dx
=
np
.
concatenate
([
dx
,
(
dout
-
sum
)
*
out
])
np
.
testing
.
assert_allclose
(
csr
.
grad
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
batch_offset
+=
np_csr
.
nnz
def
test_softmax3d
(
self
):
with
_test_eager_guard
():
batchNum
=
16
mask
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
*
mask
np_out_list
=
[]
np_out
=
np
.
array
([])
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
row_number
=
np_csr
.
shape
[
0
]
for
j
in
range
(
row_number
,
):
start
=
np_csr
.
indptr
[
j
]
end
=
np_csr
.
indptr
[
j
+
1
]
if
start
==
end
:
continue
x
=
np_csr
.
data
[
start
:
end
]
x_max
=
np
.
max
(
x
,
keepdims
=
True
)
x_exp
=
np
.
exp
(
x
-
x_max
)
x_exp_sum
=
np
.
sum
(
x_exp
,
keepdims
=
True
)
np_out_list
.
append
(
x_exp
/
x_exp_sum
)
np_out
=
np
.
concatenate
([
np_out
,
x_exp
/
x_exp_sum
])
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
out
=
m
(
csr
)
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out
.
backward
(
csr
.
detach
())
dx
=
np
.
array
([])
batch_offset
=
0
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
row_number
=
np_csr
.
shape
[
0
]
for
j
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
j
]
end
=
np_csr
.
indptr
[
j
+
1
]
if
start
==
end
:
continue
dout
=
np_csr
.
data
[
start
:
end
]
out
=
np_out
[
batch_offset
+
start
:
batch_offset
+
end
]
sum
=
np
.
sum
(
dout
*
out
,
keepdims
=
True
)
dx
=
np
.
concatenate
([
dx
,
(
dout
-
sum
)
*
out
])
batch_offset
+=
np_csr
.
nnz
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
浏览文件 @
1f93de31
...
...
@@ -17,38 +17,36 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestTranspose
(
unittest
.
TestCase
):
# x: sparse, out: sparse
def
check_result
(
self
,
x_shape
,
dims
,
format
):
with
_test_eager_guard
():
mask
=
paddle
.
randint
(
0
,
2
,
x_shape
).
astype
(
"float32"
)
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
origin_x
=
(
paddle
.
rand
(
x_shape
,
dtype
=
'float32'
)
+
1
)
*
mask
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_out
=
paddle
.
transpose
(
dense_x
,
dims
)
mask
=
paddle
.
randint
(
0
,
2
,
x_shape
).
astype
(
"float32"
)
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
origin_x
=
(
paddle
.
rand
(
x_shape
,
dtype
=
'float32'
)
+
1
)
*
mask
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_out
=
paddle
.
transpose
(
dense_x
,
dims
)
if
format
==
"coo"
:
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
len
(
x_shape
))
else
:
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
transpose
(
sp_x
,
dims
)
if
format
==
"coo"
:
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
len
(
x_shape
))
else
:
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
transpose
(
sp_x
,
dims
)
np
.
testing
.
assert_allclose
(
sp_out
.
to_dense
().
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
dense_out
.
backward
()
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
)
np
.
testing
.
assert_allclose
(
sp_out
.
to_dense
().
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
dense_out
.
backward
()
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
)
def
test_transpose_2d
(
self
):
self
.
check_result
([
2
,
5
],
[
0
,
1
],
'coo'
)
...
...
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
浏览文件 @
1f93de31
此差异已折叠。
点击以展开。
python/paddle/fluid/tests/unittests/test_split_op.py
浏览文件 @
1f93de31
...
...
@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
class
TestSplitOp
(
OpTest
):
...
...
@@ -453,24 +452,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
num_or_sections
=
3
,
dim
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
num_or_sections
=
3
,
dim
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
...
...
@@ -486,24 +482,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
[
2
,
2
,
2
],
dim
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
[
2
,
2
,
2
],
dim
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
...
...
@@ -522,24 +515,21 @@ class API_TestDygraphSplit(unittest.TestCase):
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
paddle
.
split
(
input
,
num_or_sections
=
3
,
axis
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
paddle
.
split
(
input
,
num_or_sections
=
3
,
axis
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
...
...
@@ -570,12 +560,11 @@ class API_TestDygraphSplit(unittest.TestCase):
out_dy_np
=
out_dy
.
numpy
()
ex_out
=
np
.
split
(
input_1
,
[
6
],
axis
=
1
)
ex_out
=
ex_out
[
0
]
with
_test_eager_guard
():
input
=
paddle
.
to_tensor
(
input_1
)
out_eager
=
paddle
.
split
(
input
,
[
6
],
axis
=
1
)
out_eager
=
out_eager
[
0
]
out_eager_np
=
out_dy
.
numpy
()
np
.
testing
.
assert_allclose
(
ex_out
,
out_eager_np
,
rtol
=
1e-05
)
input
=
paddle
.
to_tensor
(
input_1
)
out_eager
=
paddle
.
split
(
input
,
[
6
],
axis
=
1
)
out_eager
=
out_eager
[
0
]
out_eager_np
=
out_dy
.
numpy
()
np
.
testing
.
assert_allclose
(
ex_out
,
out_eager_np
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_out
,
out_dy_np
,
rtol
=
1e-05
)
def
test_out_tensor_input
(
self
):
...
...
@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
,
rtol
=
1e-05
)
def
func
_negative_one_section
(
self
):
def
test
_negative_one_section
(
self
):
with
fluid
.
dygraph
.
guard
():
input_1
=
np
.
random
.
random
([
4
,
6
,
6
]).
astype
(
"int32"
)
# input is a variable which shape is [4, 6, 6]
...
...
@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x0_out
=
x0
[
0
].
numpy
()
np
.
testing
.
assert_array_equal
(
x0_out
,
input
.
numpy
())
def
test_negative_one_section
(
self
):
with
_test_eager_guard
():
self
.
func_negative_one_section
()
self
.
func_negative_one_section
()
class
API_TestEmptySplit
(
unittest
.
TestCase
):
def
test_axis_input_empty_section
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录