Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
1f93de31
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1f93de31
编写于
12月 14, 2022
作者:
姜
姜永久
提交者:
GitHub
12月 15, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rm unittest eager guard tests part20 sparse_mv2split (#48879)
上级
eb322853
变更
6
展开全部
隐藏空白更改
内联
并排
Showing
6 changed file
with
625 addition
and
695 deletion
+625
-695
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
+62
-65
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
...on/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
+37
-40
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
...on/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
+81
-88
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
.../paddle/fluid/tests/unittests/test_sparse_transpose_op.py
+23
-25
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
+371
-410
python/paddle/fluid/tests/unittests/test_split_op.py
python/paddle/fluid/tests/unittests/test_split_op.py
+51
-67
未找到文件。
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
浏览文件 @
1f93de31
...
@@ -19,7 +19,6 @@ import unittest
...
@@ -19,7 +19,6 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
seed
(
100
)
paddle
.
seed
(
100
)
...
@@ -43,38 +42,37 @@ def get_cuda_version():
...
@@ -43,38 +42,37 @@ def get_cuda_version():
class
TestCsrMv
(
unittest
.
TestCase
):
class
TestCsrMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
origin_x
=
origin_x
*
mask
origin_x
=
origin_x
*
mask
origin_vec
=
paddle
.
rand
([
32
])
origin_vec
=
paddle
.
rand
([
32
])
dense_x
=
origin_x
.
detach
()
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_x
.
stop_gradient
=
False
dense_vec
=
origin_vec
.
detach
()
dense_vec
=
origin_vec
.
detach
()
dense_vec
.
stop_gradient
=
False
dense_vec
.
stop_gradient
=
False
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
.
backward
()
dense_out
.
backward
()
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
.
stop_gradient
=
False
sp_x
.
stop_gradient
=
False
sp_vec
=
origin_vec
.
detach
()
sp_vec
=
origin_vec
.
detach
()
sp_vec
.
stop_gradient
=
False
sp_vec
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
.
backward
()
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
rtol
=
1e-05
,
)
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
)
)
@
unittest
.
skipIf
(
@
unittest
.
skipIf
(
...
@@ -84,38 +82,37 @@ class TestCsrMv(unittest.TestCase):
...
@@ -84,38 +82,37 @@ class TestCsrMv(unittest.TestCase):
class
TestCooMv
(
unittest
.
TestCase
):
class
TestCooMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
origin_x
=
origin_x
*
mask
origin_x
=
origin_x
*
mask
origin_vec
=
paddle
.
rand
([
32
])
origin_vec
=
paddle
.
rand
([
32
])
dense_x
=
origin_x
.
detach
()
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_x
.
stop_gradient
=
False
dense_vec
=
origin_vec
.
detach
()
dense_vec
=
origin_vec
.
detach
()
dense_vec
.
stop_gradient
=
False
dense_vec
.
stop_gradient
=
False
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
=
paddle
.
mv
(
dense_x
,
dense_vec
)
dense_out
.
backward
()
dense_out
.
backward
()
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
sparse_dim
=
2
)
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
sparse_dim
=
2
)
sp_x
.
stop_gradient
=
False
sp_x
.
stop_gradient
=
False
sp_vec
=
origin_vec
.
detach
()
sp_vec
=
origin_vec
.
detach
()
sp_vec
.
stop_gradient
=
False
sp_vec
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
=
paddle
.
sparse
.
mv
(
sp_x
,
sp_vec
)
sp_out
.
backward
()
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
sp_out
.
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
rtol
=
1e-05
,
)
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
sp_vec
.
grad
.
numpy
(),
dense_vec
.
grad
.
numpy
(),
rtol
=
1e-05
)
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
浏览文件 @
1f93de31
...
@@ -18,7 +18,6 @@ import unittest
...
@@ -18,7 +18,6 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestMaxPool3DFunc
(
unittest
.
TestCase
):
class
TestMaxPool3DFunc
(
unittest
.
TestCase
):
...
@@ -42,32 +41,31 @@ class TestMaxPool3DFunc(unittest.TestCase):
...
@@ -42,32 +41,31 @@ class TestMaxPool3DFunc(unittest.TestCase):
self
.
setPadding
()
self
.
setPadding
()
def
test
(
self
):
def
test
(
self
):
with
_test_eager_guard
():
self
.
setUp
()
self
.
setUp
()
self
.
dense_x
.
stop_gradient
=
False
self
.
dense_x
.
stop_gradient
=
False
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
sparse_out
=
paddle
.
sparse
.
nn
.
functional
.
max_pool3d
(
sparse_out
=
paddle
.
sparse
.
nn
.
functional
.
max_pool3d
(
sparse_x
,
sparse_x
,
self
.
kernel_sizes
,
self
.
kernel_sizes
,
stride
=
self
.
strides
,
stride
=
self
.
strides
,
padding
=
self
.
paddings
,
padding
=
self
.
paddings
,
)
)
out
=
sparse_out
.
to_dense
()
out
=
sparse_out
.
to_dense
()
out
.
backward
(
out
)
out
.
backward
(
out
)
dense_x
=
copy
.
deepcopy
(
self
.
dense_x
)
dense_x
=
copy
.
deepcopy
(
self
.
dense_x
)
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_x
,
dense_x
,
self
.
kernel_sizes
,
self
.
kernel_sizes
,
stride
=
self
.
strides
,
stride
=
self
.
strides
,
padding
=
self
.
paddings
,
padding
=
self
.
paddings
,
data_format
=
'NDHWC'
,
data_format
=
'NDHWC'
,
)
)
dense_out
.
backward
(
dense_out
)
dense_out
.
backward
(
dense_out
)
# compare with dense
# compare with dense
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
assert
np
.
allclose
(
dense_x
.
grad
.
numpy
(),
self
.
dense_x
.
grad
.
numpy
())
assert
np
.
allclose
(
dense_x
.
grad
.
numpy
(),
self
.
dense_x
.
grad
.
numpy
())
class
TestStride
(
TestMaxPool3DFunc
):
class
TestStride
(
TestMaxPool3DFunc
):
...
@@ -102,19 +100,18 @@ class TestInput(TestMaxPool3DFunc):
...
@@ -102,19 +100,18 @@ class TestInput(TestMaxPool3DFunc):
class
TestMaxPool3DAPI
(
unittest
.
TestCase
):
class
TestMaxPool3DAPI
(
unittest
.
TestCase
):
def
test
(
self
):
def
test
(
self
):
with
_test_eager_guard
():
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
max_pool3d
=
paddle
.
sparse
.
nn
.
MaxPool3D
(
max_pool3d
=
paddle
.
sparse
.
nn
.
MaxPool3D
(
kernel_size
=
3
,
data_format
=
'NDHWC'
kernel_size
=
3
,
data_format
=
'NDHWC'
)
)
out
=
max_pool3d
(
sparse_x
)
out
=
max_pool3d
(
sparse_x
)
out
=
out
.
to_dense
()
out
=
out
.
to_dense
()
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_out
=
paddle
.
nn
.
functional
.
max_pool3d
(
dense_x
,
3
,
data_format
=
'NDHWC'
dense_x
,
3
,
data_format
=
'NDHWC'
)
)
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
assert
np
.
allclose
(
dense_out
.
numpy
(),
out
.
numpy
())
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
浏览文件 @
1f93de31
...
@@ -18,117 +18,110 @@ import numpy as np
...
@@ -18,117 +18,110 @@ import numpy as np
import
scipy.sparse
as
sp
import
scipy.sparse
as
sp
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
np
.
random
.
seed
(
2022
)
np
.
random
.
seed
(
2022
)
class
TestCsrSoftmax
(
unittest
.
TestCase
):
class
TestCsrSoftmax
(
unittest
.
TestCase
):
def
test_softmax2d
(
self
):
def
test_softmax2d
(
self
):
with
_test_eager_guard
():
mask
=
np
.
random
.
rand
(
16
,
128
)
<
0.5
mask
=
np
.
random
.
rand
(
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
16
,
128
)
*
mask
np_x
=
np
.
random
.
rand
(
16
,
128
)
*
mask
np_csr
=
sp
.
csr_matrix
(
np_x
)
np_csr
=
sp
.
csr_matrix
(
np_x
)
row_number
=
np_csr
.
shape
[
0
]
np_out
=
np
.
array
([])
for
i
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
i
]
end
=
np_csr
.
indptr
[
i
+
1
]
if
start
==
end
:
continue
x
=
np_csr
.
data
[
start
:
end
]
x_max
=
np
.
max
(
x
,
keepdims
=
True
)
x_exp
=
np
.
exp
(
x
-
x_max
)
x_exp_sum
=
np
.
sum
(
x_exp
,
keepdims
=
True
)
np_out
=
np
.
concatenate
([
np_out
,
x_exp
/
x_exp_sum
])
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
out
=
m
(
csr
)
np
.
testing
.
assert_allclose
(
out
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
out
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out
.
backward
(
csr
.
detach
())
dx
=
np
.
array
([])
for
i
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
i
]
end
=
np_csr
.
indptr
[
i
+
1
]
if
start
==
end
:
continue
out
=
np_out
[
start
:
end
]
dout
=
np_csr
.
data
[
start
:
end
]
sum
=
np
.
sum
(
dout
*
out
,
keepdims
=
True
)
dx
=
np
.
concatenate
([
dx
,
(
dout
-
sum
)
*
out
])
np
.
testing
.
assert_allclose
(
csr
.
grad
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
def
test_softmax3d
(
self
):
batchNum
=
16
mask
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
*
mask
np_out_list
=
[]
np_out
=
np
.
array
([])
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
row_number
=
np_csr
.
shape
[
0
]
row_number
=
np_csr
.
shape
[
0
]
np_out
=
np
.
array
([])
for
j
in
range
(
for
i
in
range
(
row_number
):
row_number
,
start
=
np_csr
.
indptr
[
i
]
):
end
=
np_csr
.
indptr
[
i
+
1
]
start
=
np_csr
.
indptr
[
j
]
end
=
np_csr
.
indptr
[
j
+
1
]
if
start
==
end
:
if
start
==
end
:
continue
continue
x
=
np_csr
.
data
[
start
:
end
]
x
=
np_csr
.
data
[
start
:
end
]
x_max
=
np
.
max
(
x
,
keepdims
=
True
)
x_max
=
np
.
max
(
x
,
keepdims
=
True
)
x_exp
=
np
.
exp
(
x
-
x_max
)
x_exp
=
np
.
exp
(
x
-
x_max
)
x_exp_sum
=
np
.
sum
(
x_exp
,
keepdims
=
True
)
x_exp_sum
=
np
.
sum
(
x_exp
,
keepdims
=
True
)
np_out_list
.
append
(
x_exp
/
x_exp_sum
)
np_out
=
np
.
concatenate
([
np_out
,
x_exp
/
x_exp_sum
])
np_out
=
np
.
concatenate
([
np_out
,
x_exp
/
x_exp_sum
])
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
out
=
m
(
csr
)
out
=
m
(
csr
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
out
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
np
.
testing
.
assert_allclose
(
out
.
backward
(
csr
.
detach
())
out
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
dx
=
np
.
array
([])
)
batch_offset
=
0
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
# dx = (dout - sum(dout * out)) * out, dout=rand_x
row_number
=
np_csr
.
shape
[
0
]
out
.
backward
(
csr
.
detach
())
for
j
in
range
(
row_number
):
dx
=
np
.
array
([])
start
=
np_csr
.
indptr
[
j
]
for
i
in
range
(
row_number
):
end
=
np_csr
.
indptr
[
j
+
1
]
start
=
np_csr
.
indptr
[
i
]
end
=
np_csr
.
indptr
[
i
+
1
]
if
start
==
end
:
if
start
==
end
:
continue
continue
out
=
np_out
[
start
:
end
]
dout
=
np_csr
.
data
[
start
:
end
]
dout
=
np_csr
.
data
[
start
:
end
]
out
=
np_out
[
batch_offset
+
start
:
batch_offset
+
end
]
sum
=
np
.
sum
(
dout
*
out
,
keepdims
=
True
)
sum
=
np
.
sum
(
dout
*
out
,
keepdims
=
True
)
dx
=
np
.
concatenate
([
dx
,
(
dout
-
sum
)
*
out
])
dx
=
np
.
concatenate
([
dx
,
(
dout
-
sum
)
*
out
])
np
.
testing
.
assert_allclose
(
batch_offset
+=
np_csr
.
nnz
csr
.
grad
.
crows
().
numpy
(),
np_csr
.
indptr
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
def
test_softmax3d
(
self
):
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
with
_test_eager_guard
():
batchNum
=
16
mask
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
*
mask
np_out_list
=
[]
np_out
=
np
.
array
([])
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
row_number
=
np_csr
.
shape
[
0
]
for
j
in
range
(
row_number
,
):
start
=
np_csr
.
indptr
[
j
]
end
=
np_csr
.
indptr
[
j
+
1
]
if
start
==
end
:
continue
x
=
np_csr
.
data
[
start
:
end
]
x_max
=
np
.
max
(
x
,
keepdims
=
True
)
x_exp
=
np
.
exp
(
x
-
x_max
)
x_exp_sum
=
np
.
sum
(
x_exp
,
keepdims
=
True
)
np_out_list
.
append
(
x_exp
/
x_exp_sum
)
np_out
=
np
.
concatenate
([
np_out
,
x_exp
/
x_exp_sum
])
csr
=
paddle
.
to_tensor
(
np_x
,
stop_gradient
=
False
).
to_sparse_csr
()
m
=
paddle
.
sparse
.
nn
.
Softmax
()
out
=
m
(
csr
)
np
.
testing
.
assert_allclose
(
out
.
values
().
numpy
(),
np_out
,
rtol
=
1e-05
)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out
.
backward
(
csr
.
detach
())
dx
=
np
.
array
([])
batch_offset
=
0
for
i
in
range
(
batchNum
):
np_csr
=
sp
.
csr_matrix
(
np_x
[
i
,
:,
:])
row_number
=
np_csr
.
shape
[
0
]
for
j
in
range
(
row_number
):
start
=
np_csr
.
indptr
[
j
]
end
=
np_csr
.
indptr
[
j
+
1
]
if
start
==
end
:
continue
dout
=
np_csr
.
data
[
start
:
end
]
out
=
np_out
[
batch_offset
+
start
:
batch_offset
+
end
]
sum
=
np
.
sum
(
dout
*
out
,
keepdims
=
True
)
dx
=
np
.
concatenate
([
dx
,
(
dout
-
sum
)
*
out
])
batch_offset
+=
np_csr
.
nnz
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
浏览文件 @
1f93de31
...
@@ -17,38 +17,36 @@ import unittest
...
@@ -17,38 +17,36 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestTranspose
(
unittest
.
TestCase
):
class
TestTranspose
(
unittest
.
TestCase
):
# x: sparse, out: sparse
# x: sparse, out: sparse
def
check_result
(
self
,
x_shape
,
dims
,
format
):
def
check_result
(
self
,
x_shape
,
dims
,
format
):
with
_test_eager_guard
():
mask
=
paddle
.
randint
(
0
,
2
,
x_shape
).
astype
(
"float32"
)
mask
=
paddle
.
randint
(
0
,
2
,
x_shape
).
astype
(
"float32"
)
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
# or the backward checks may fail.
origin_x
=
(
paddle
.
rand
(
x_shape
,
dtype
=
'float32'
)
+
1
)
*
mask
origin_x
=
(
paddle
.
rand
(
x_shape
,
dtype
=
'float32'
)
+
1
)
*
mask
dense_x
=
origin_x
.
detach
()
dense_x
=
origin_x
.
detach
()
dense_x
.
stop_gradient
=
False
dense_x
.
stop_gradient
=
False
dense_out
=
paddle
.
transpose
(
dense_x
,
dims
)
dense_out
=
paddle
.
transpose
(
dense_x
,
dims
)
if
format
==
"coo"
:
if
format
==
"coo"
:
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
len
(
x_shape
))
sp_x
=
origin_x
.
detach
().
to_sparse_coo
(
len
(
x_shape
))
else
:
else
:
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
=
origin_x
.
detach
().
to_sparse_csr
()
sp_x
.
stop_gradient
=
False
sp_x
.
stop_gradient
=
False
sp_out
=
paddle
.
sparse
.
transpose
(
sp_x
,
dims
)
sp_out
=
paddle
.
sparse
.
transpose
(
sp_x
,
dims
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_out
.
to_dense
().
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
sp_out
.
to_dense
().
numpy
(),
dense_out
.
numpy
(),
rtol
=
1e-05
)
)
dense_out
.
backward
()
dense_out
.
backward
()
sp_out
.
backward
()
sp_out
.
backward
()
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
sp_x
.
grad
.
to_dense
().
numpy
(),
sp_x
.
grad
.
to_dense
().
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
(
dense_x
.
grad
*
mask
).
numpy
(),
rtol
=
1e-05
,
rtol
=
1e-05
,
)
)
def
test_transpose_2d
(
self
):
def
test_transpose_2d
(
self
):
self
.
check_result
([
2
,
5
],
[
0
,
1
],
'coo'
)
self
.
check_result
([
2
,
5
],
[
0
,
1
],
'coo'
)
...
...
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
浏览文件 @
1f93de31
此差异已折叠。
点击以展开。
python/paddle/fluid/tests/unittests/test_split_op.py
浏览文件 @
1f93de31
...
@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
...
@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
class
TestSplitOp
(
OpTest
):
class
TestSplitOp
(
OpTest
):
...
@@ -453,24 +452,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
...
@@ -453,24 +452,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
num_or_sections
=
3
,
dim
=
1
)
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
num_or_sections
=
3
,
dim
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
=
x0
.
sum
()
loss
.
backward
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
...
@@ -486,24 +482,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
...
@@ -486,24 +482,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
[
2
,
2
,
2
],
dim
=
1
)
x0
,
x1
,
x2
=
fluid
.
layers
.
split
(
input
,
[
2
,
2
,
2
],
dim
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
=
x0
.
sum
()
loss
.
backward
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
...
@@ -522,24 +515,21 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -522,24 +515,21 @@ class API_TestDygraphSplit(unittest.TestCase):
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
input
.
stop_gradient
=
False
x0
,
x1
,
x2
=
paddle
.
split
(
input
,
num_or_sections
=
3
,
axis
=
1
)
x0
,
x1
,
x2
=
paddle
.
split
(
input
,
num_or_sections
=
3
,
axis
=
1
)
eager_x0_out
=
x0
.
numpy
()
eager_x0_out
=
x0
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x1_out
=
x1
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
eager_x2_out
=
x2
.
numpy
()
loss
=
x0
.
sum
()
loss
=
x0
.
sum
()
loss
.
backward
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
...
@@ -570,12 +560,11 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -570,12 +560,11 @@ class API_TestDygraphSplit(unittest.TestCase):
out_dy_np
=
out_dy
.
numpy
()
out_dy_np
=
out_dy
.
numpy
()
ex_out
=
np
.
split
(
input_1
,
[
6
],
axis
=
1
)
ex_out
=
np
.
split
(
input_1
,
[
6
],
axis
=
1
)
ex_out
=
ex_out
[
0
]
ex_out
=
ex_out
[
0
]
with
_test_eager_guard
():
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
out_eager
=
paddle
.
split
(
input
,
[
6
],
axis
=
1
)
out_eager
=
paddle
.
split
(
input
,
[
6
],
axis
=
1
)
out_eager
=
out_eager
[
0
]
out_eager
=
out_eager
[
0
]
out_eager_np
=
out_dy
.
numpy
()
out_eager_np
=
out_dy
.
numpy
()
np
.
testing
.
assert_allclose
(
ex_out
,
out_eager_np
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_out
,
out_eager_np
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_out
,
out_dy_np
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_out
,
out_dy_np
,
rtol
=
1e-05
)
def
test_out_tensor_input
(
self
):
def
test_out_tensor_input
(
self
):
...
@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
,
rtol
=
1e-05
)
def
func
_negative_one_section
(
self
):
def
test
_negative_one_section
(
self
):
with
fluid
.
dygraph
.
guard
():
with
fluid
.
dygraph
.
guard
():
input_1
=
np
.
random
.
random
([
4
,
6
,
6
]).
astype
(
"int32"
)
input_1
=
np
.
random
.
random
([
4
,
6
,
6
]).
astype
(
"int32"
)
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
...
@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x0_out
=
x0
[
0
].
numpy
()
x0_out
=
x0
[
0
].
numpy
()
np
.
testing
.
assert_array_equal
(
x0_out
,
input
.
numpy
())
np
.
testing
.
assert_array_equal
(
x0_out
,
input
.
numpy
())
def
test_negative_one_section
(
self
):
with
_test_eager_guard
():
self
.
func_negative_one_section
()
self
.
func_negative_one_section
()
class
API_TestEmptySplit
(
unittest
.
TestCase
):
class
API_TestEmptySplit
(
unittest
.
TestCase
):
def
test_axis_input_empty_section
(
self
):
def
test_axis_input_empty_section
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录