Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
1f93de31
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1f93de31
编写于
12月 14, 2022
作者:
姜
姜永久
提交者:
GitHub
12月 15, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rm unittest eager guard tests part20 sparse_mv2split (#48879)
上级
eb322853
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
625 addition
and
695 deletion
+625
-695
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
+62
-65
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
...on/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
+37
-40
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
...on/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
+81
-88
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
.../paddle/fluid/tests/unittests/test_sparse_transpose_op.py
+23
-25
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
+371
-410
python/paddle/fluid/tests/unittests/test_split_op.py
python/paddle/fluid/tests/unittests/test_split_op.py
+51
-67
未找到文件。
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
浏览文件 @
1f93de31
...
@@ -19,7 +19,6 @@ import unittest
...
@@ -19,7 +19,6 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
seed
(
100
)
paddle
.
seed
(
100
)
...
@@ -43,7 +42,6 @@ def get_cuda_version():
...
@@ -43,7 +42,6 @@ def get_cuda_version():
class
TestCsrMv
(
unittest
.
TestCase
):
class
TestCsrMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
...
@@ -84,7 +82,6 @@ class TestCsrMv(unittest.TestCase):
...
@@ -84,7 +82,6 @@ class TestCsrMv(unittest.TestCase):
class
TestCooMv
(
unittest
.
TestCase
):
class
TestCooMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
...
...
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
浏览文件 @
1f93de31
...
@@ -18,7 +18,6 @@ import unittest
...
@@ -18,7 +18,6 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestMaxPool3DFunc
(
unittest
.
TestCase
):
class
TestMaxPool3DFunc
(
unittest
.
TestCase
):
...
@@ -42,7 +41,6 @@ class TestMaxPool3DFunc(unittest.TestCase):
...
@@ -42,7 +41,6 @@ class TestMaxPool3DFunc(unittest.TestCase):
self
.
setPadding
()
self
.
setPadding
()
def
test
(
self
):
def
test
(
self
):
with
_test_eager_guard
():
self
.
setUp
()
self
.
setUp
()
self
.
dense_x
.
stop_gradient
=
False
self
.
dense_x
.
stop_gradient
=
False
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
...
@@ -102,7 +100,6 @@ class TestInput(TestMaxPool3DFunc):
...
@@ -102,7 +100,6 @@ class TestInput(TestMaxPool3DFunc):
class
TestMaxPool3DAPI
(
unittest
.
TestCase
):
class
TestMaxPool3DAPI
(
unittest
.
TestCase
):
def
test
(
self
):
def
test
(
self
):
with
_test_eager_guard
():
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
max_pool3d
=
paddle
.
sparse
.
nn
.
MaxPool3D
(
max_pool3d
=
paddle
.
sparse
.
nn
.
MaxPool3D
(
...
...
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
浏览文件 @
1f93de31
...
@@ -18,14 +18,12 @@ import numpy as np
...
@@ -18,14 +18,12 @@ import numpy as np
import
scipy.sparse
as
sp
import
scipy.sparse
as
sp
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
np
.
random
.
seed
(
2022
)
np
.
random
.
seed
(
2022
)
class
TestCsrSoftmax
(
unittest
.
TestCase
):
class
TestCsrSoftmax
(
unittest
.
TestCase
):
def
test_softmax2d
(
self
):
def
test_softmax2d
(
self
):
with
_test_eager_guard
():
mask
=
np
.
random
.
rand
(
16
,
128
)
<
0.5
mask
=
np
.
random
.
rand
(
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
16
,
128
)
*
mask
np_x
=
np
.
random
.
rand
(
16
,
128
)
*
mask
np_csr
=
sp
.
csr_matrix
(
np_x
)
np_csr
=
sp
.
csr_matrix
(
np_x
)
...
@@ -73,12 +71,9 @@ class TestCsrSoftmax(unittest.TestCase):
...
@@ -73,12 +71,9 @@ class TestCsrSoftmax(unittest.TestCase):
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
csr
.
grad
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
csr
.
grad
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
def
test_softmax3d
(
self
):
def
test_softmax3d
(
self
):
with
_test_eager_guard
():
batchNum
=
16
batchNum
=
16
mask
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
<
0.5
mask
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
*
mask
np_x
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
*
mask
...
@@ -126,9 +121,7 @@ class TestCsrSoftmax(unittest.TestCase):
...
@@ -126,9 +121,7 @@ class TestCsrSoftmax(unittest.TestCase):
batch_offset
+=
np_csr
.
nnz
batch_offset
+=
np_csr
.
nnz
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
浏览文件 @
1f93de31
...
@@ -17,13 +17,11 @@ import unittest
...
@@ -17,13 +17,11 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestTranspose
(
unittest
.
TestCase
):
class
TestTranspose
(
unittest
.
TestCase
):
# x: sparse, out: sparse
# x: sparse, out: sparse
def
check_result
(
self
,
x_shape
,
dims
,
format
):
def
check_result
(
self
,
x_shape
,
dims
,
format
):
with
_test_eager_guard
():
mask
=
paddle
.
randint
(
0
,
2
,
x_shape
).
astype
(
"float32"
)
mask
=
paddle
.
randint
(
0
,
2
,
x_shape
).
astype
(
"float32"
)
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
# or the backward checks may fail.
...
...
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
浏览文件 @
1f93de31
...
@@ -19,14 +19,12 @@ import numpy as np
...
@@ -19,14 +19,12 @@ import numpy as np
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.fluid.framework
import
_test_eager_guard
devices
=
[
'cpu'
,
'gpu'
]
devices
=
[
'cpu'
,
'gpu'
]
class
TestSparseCreate
(
unittest
.
TestCase
):
class
TestSparseCreate
(
unittest
.
TestCase
):
def
test_create_coo_by_tensor
(
self
):
def
test_create_coo_by_tensor
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1
,
2
,
3
,
4
,
5
]
values
=
[
1
,
2
,
3
,
4
,
5
]
dense_shape
=
[
3
,
4
]
dense_shape
=
[
3
,
4
]
...
@@ -40,7 +38,6 @@ class TestSparseCreate(unittest.TestCase):
...
@@ -40,7 +38,6 @@ class TestSparseCreate(unittest.TestCase):
assert
np
.
array_equal
(
values
,
coo
.
values
().
numpy
())
assert
np
.
array_equal
(
values
,
coo
.
values
().
numpy
())
def
test_create_coo_by_np
(
self
):
def
test_create_coo_by_np
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
1
,
2
],
[
1
,
2
,
0
]]
indices
=
[[
0
,
1
,
2
],
[
1
,
2
,
0
]]
values
=
[
1.0
,
2.0
,
3.0
]
values
=
[
1.0
,
2.0
,
3.0
]
dense_shape
=
[
3
,
3
]
dense_shape
=
[
3
,
3
]
...
@@ -50,7 +47,6 @@ class TestSparseCreate(unittest.TestCase):
...
@@ -50,7 +47,6 @@ class TestSparseCreate(unittest.TestCase):
assert
np
.
array_equal
(
values
,
coo
.
values
().
numpy
())
assert
np
.
array_equal
(
values
,
coo
.
values
().
numpy
())
def
test_create_csr_by_tensor
(
self
):
def
test_create_csr_by_tensor
(
self
):
with
_test_eager_guard
():
crows
=
[
0
,
2
,
3
,
5
]
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
values
=
[
1
,
2
,
3
,
4
,
5
]
values
=
[
1
,
2
,
3
,
4
,
5
]
...
@@ -68,14 +64,11 @@ class TestSparseCreate(unittest.TestCase):
...
@@ -68,14 +64,11 @@ class TestSparseCreate(unittest.TestCase):
)
)
def
test_create_csr_by_np
(
self
):
def
test_create_csr_by_np
(
self
):
with
_test_eager_guard
():
crows
=
[
0
,
2
,
3
,
5
]
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
values
=
[
1
,
2
,
3
,
4
,
5
]
values
=
[
1
,
2
,
3
,
4
,
5
]
dense_shape
=
[
3
,
4
]
dense_shape
=
[
3
,
4
]
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
dense_shape
)
crows
,
cols
,
values
,
dense_shape
)
# test the to_string.py
# test the to_string.py
assert
np
.
array_equal
(
5
,
csr
.
nnz
())
assert
np
.
array_equal
(
5
,
csr
.
nnz
())
assert
np
.
array_equal
(
crows
,
csr
.
crows
().
numpy
())
assert
np
.
array_equal
(
crows
,
csr
.
crows
().
numpy
())
...
@@ -83,7 +76,6 @@ class TestSparseCreate(unittest.TestCase):
...
@@ -83,7 +76,6 @@ class TestSparseCreate(unittest.TestCase):
assert
np
.
array_equal
(
values
,
csr
.
values
().
numpy
())
assert
np
.
array_equal
(
values
,
csr
.
values
().
numpy
())
def
test_place
(
self
):
def
test_place
(
self
):
with
_test_eager_guard
():
place
=
core
.
CPUPlace
()
place
=
core
.
CPUPlace
()
indices
=
[[
0
,
1
],
[
0
,
1
]]
indices
=
[[
0
,
1
],
[
0
,
1
]]
values
=
[
1.0
,
2.0
]
values
=
[
1.0
,
2.0
]
...
@@ -107,7 +99,6 @@ class TestSparseCreate(unittest.TestCase):
...
@@ -107,7 +99,6 @@ class TestSparseCreate(unittest.TestCase):
assert
csr
.
values
().
place
.
is_cpu_place
()
assert
csr
.
values
().
place
.
is_cpu_place
()
def
test_dtype
(
self
):
def
test_dtype
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
1
],
[
0
,
1
]]
indices
=
[[
0
,
1
],
[
0
,
1
]]
values
=
[
1.0
,
2.0
]
values
=
[
1.0
,
2.0
]
dense_shape
=
[
2
,
2
]
dense_shape
=
[
2
,
2
]
...
@@ -127,7 +118,6 @@ class TestSparseCreate(unittest.TestCase):
...
@@ -127,7 +118,6 @@ class TestSparseCreate(unittest.TestCase):
assert
csr
.
dtype
==
paddle
.
float16
assert
csr
.
dtype
==
paddle
.
float16
def
test_create_coo_no_shape
(
self
):
def
test_create_coo_no_shape
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
1
],
[
0
,
1
]]
indices
=
[[
0
,
1
],
[
0
,
1
]]
values
=
[
1.0
,
2.0
]
values
=
[
1.0
,
2.0
]
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
...
@@ -138,7 +128,6 @@ class TestSparseCreate(unittest.TestCase):
...
@@ -138,7 +128,6 @@ class TestSparseCreate(unittest.TestCase):
class
TestSparseConvert
(
unittest
.
TestCase
):
class
TestSparseConvert
(
unittest
.
TestCase
):
def
test_to_sparse_coo
(
self
):
def
test_to_sparse_coo
(
self
):
with
_test_eager_guard
():
x
=
[[
0
,
1
,
0
,
2
],
[
0
,
0
,
3
,
0
],
[
4
,
5
,
0
,
0
]]
x
=
[[
0
,
1
,
0
,
2
],
[
0
,
0
,
3
,
0
],
[
4
,
5
,
0
,
0
]]
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
...
@@ -156,13 +145,10 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -156,13 +145,10 @@ class TestSparseConvert(unittest.TestCase):
stop_gradient
=
True
,
stop_gradient
=
True
,
)
)
out
.
backward
(
out_grad
)
out
.
backward
(
out_grad
)
assert
np
.
array_equal
(
assert
np
.
array_equal
(
dense_x
.
grad
.
numpy
(),
out_grad
.
to_dense
().
numpy
())
dense_x
.
grad
.
numpy
(),
out_grad
.
to_dense
().
numpy
()
)
def
test_coo_to_dense
(
self
):
def
test_coo_to_dense
(
self
):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
indices_dtypes
=
[
'int32'
,
'int64'
]
indices_dtypes
=
[
'int32'
,
'int64'
]
...
@@ -202,7 +188,6 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -202,7 +188,6 @@ class TestSparseConvert(unittest.TestCase):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
False
})
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
False
})
def
test_to_sparse_csr
(
self
):
def
test_to_sparse_csr
(
self
):
with
_test_eager_guard
():
x
=
[[
0
,
1
,
0
,
2
],
[
0
,
0
,
3
,
0
],
[
4
,
5
,
0
,
0
]]
x
=
[[
0
,
1
,
0
,
2
],
[
0
,
0
,
3
,
0
],
[
4
,
5
,
0
,
0
]]
crows
=
[
0
,
2
,
3
,
5
]
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
...
@@ -218,7 +203,6 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -218,7 +203,6 @@ class TestSparseConvert(unittest.TestCase):
def
test_coo_values_grad
(
self
):
def
test_coo_values_grad
(
self
):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
...
@@ -260,7 +244,6 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -260,7 +244,6 @@ class TestSparseConvert(unittest.TestCase):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
False
})
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
False
})
def
test_sparse_coo_tensor_grad
(
self
):
def
test_sparse_coo_tensor_grad
(
self
):
with
_test_eager_guard
():
for
device
in
devices
:
for
device
in
devices
:
if
device
==
'cpu'
or
(
if
device
==
'cpu'
or
(
device
==
'gpu'
and
paddle
.
is_compiled_with_cuda
()
device
==
'gpu'
and
paddle
.
is_compiled_with_cuda
()
...
@@ -284,9 +267,7 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -284,9 +267,7 @@ class TestSparseConvert(unittest.TestCase):
)
)
sparse_x
.
backward
(
sparse_out_grad
)
sparse_x
.
backward
(
sparse_out_grad
)
correct_values_grad
=
[
0
,
3
]
correct_values_grad
=
[
0
,
3
]
assert
np
.
array_equal
(
assert
np
.
array_equal
(
correct_values_grad
,
values
.
grad
.
numpy
())
correct_values_grad
,
values
.
grad
.
numpy
()
)
# test the non-zero values is a vector
# test the non-zero values is a vector
values
=
[[
1
,
1
],
[
2
,
2
]]
values
=
[[
1
,
1
],
[
2
,
2
]]
...
@@ -303,12 +284,9 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -303,12 +284,9 @@ class TestSparseConvert(unittest.TestCase):
)
)
sparse_x
.
backward
(
sparse_out_grad
)
sparse_x
.
backward
(
sparse_out_grad
)
correct_values_grad
=
[[
0
,
0
],
[
3
,
3
]]
correct_values_grad
=
[[
0
,
0
],
[
3
,
3
]]
assert
np
.
array_equal
(
assert
np
.
array_equal
(
correct_values_grad
,
values
.
grad
.
numpy
())
correct_values_grad
,
values
.
grad
.
numpy
()
)
def
test_sparse_coo_tensor_sorted
(
self
):
def
test_sparse_coo_tensor_sorted
(
self
):
with
_test_eager_guard
():
for
device
in
devices
:
for
device
in
devices
:
if
device
==
'cpu'
or
(
if
device
==
'cpu'
or
(
device
==
'gpu'
and
paddle
.
is_compiled_with_cuda
()
device
==
'gpu'
and
paddle
.
is_compiled_with_cuda
()
...
@@ -326,9 +304,7 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -326,9 +304,7 @@ class TestSparseConvert(unittest.TestCase):
assert
np
.
array_equal
(
assert
np
.
array_equal
(
indices_sorted
,
sparse_x
.
indices
().
numpy
()
indices_sorted
,
sparse_x
.
indices
().
numpy
()
)
)
assert
np
.
array_equal
(
assert
np
.
array_equal
(
values_sorted
,
sparse_x
.
values
().
numpy
())
values_sorted
,
sparse_x
.
values
().
numpy
()
)
# test the non-zero values is a vector
# test the non-zero values is a vector
values
=
[[
1.0
,
1.0
],
[
2.0
,
2.0
],
[
3.0
,
3.0
]]
values
=
[[
1.0
,
1.0
],
[
2.0
,
2.0
],
[
3.0
,
3.0
]]
...
@@ -339,13 +315,9 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -339,13 +315,9 @@ class TestSparseConvert(unittest.TestCase):
assert
np
.
array_equal
(
assert
np
.
array_equal
(
indices_sorted
,
sparse_x
.
indices
().
numpy
()
indices_sorted
,
sparse_x
.
indices
().
numpy
()
)
)
assert
np
.
array_equal
(
assert
np
.
array_equal
(
values_sorted
,
sparse_x
.
values
().
numpy
())
values_sorted
,
sparse_x
.
values
().
numpy
()
)
def
test_batch_csr
(
self
):
def
test_batch_csr
(
self
):
with
_test_eager_guard
():
def
verify
(
dense_x
):
def
verify
(
dense_x
):
sparse_x
=
dense_x
.
to_sparse_csr
()
sparse_x
=
dense_x
.
to_sparse_csr
()
out
=
sparse_x
.
to_dense
()
out
=
sparse_x
.
to_dense
()
...
@@ -383,7 +355,6 @@ class TestSparseConvert(unittest.TestCase):
...
@@ -383,7 +355,6 @@ class TestSparseConvert(unittest.TestCase):
class
TestCooError
(
unittest
.
TestCase
):
class
TestCooError
(
unittest
.
TestCase
):
def
test_small_shape
(
self
):
def
test_small_shape
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
indices
=
[[
2
,
3
],
[
0
,
2
]]
indices
=
[[
2
,
3
],
[
0
,
2
]]
values
=
[
1
,
2
]
values
=
[
1
,
2
]
...
@@ -394,7 +365,6 @@ class TestCooError(unittest.TestCase):
...
@@ -394,7 +365,6 @@ class TestCooError(unittest.TestCase):
)
)
def
test_same_nnz
(
self
):
def
test_same_nnz
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
# 2. test the nnz of indices must same as nnz of values
# 2. test the nnz of indices must same as nnz of values
indices
=
[[
1
,
2
],
[
1
,
0
]]
indices
=
[[
1
,
2
],
[
1
,
0
]]
...
@@ -402,7 +372,6 @@ class TestCooError(unittest.TestCase):
...
@@ -402,7 +372,6 @@ class TestCooError(unittest.TestCase):
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
def
test_same_dimensions
(
self
):
def
test_same_dimensions
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
indices
=
[[
1
,
2
],
[
1
,
0
]]
indices
=
[[
1
,
2
],
[
1
,
0
]]
values
=
[
1
,
2
,
3
]
values
=
[
1
,
2
,
3
]
...
@@ -412,7 +381,6 @@ class TestCooError(unittest.TestCase):
...
@@ -412,7 +381,6 @@ class TestCooError(unittest.TestCase):
)
)
def
test_indices_dtype
(
self
):
def
test_indices_dtype
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
TypeError
):
with
self
.
assertRaises
(
TypeError
):
indices
=
[[
1.0
,
2.0
],
[
0
,
1
]]
indices
=
[[
1.0
,
2.0
],
[
0
,
1
]]
values
=
[
1
,
2
]
values
=
[
1
,
2
]
...
@@ -421,7 +389,6 @@ class TestCooError(unittest.TestCase):
...
@@ -421,7 +389,6 @@ class TestCooError(unittest.TestCase):
class
TestCsrError
(
unittest
.
TestCase
):
class
TestCsrError
(
unittest
.
TestCase
):
def
test_dimension1
(
self
):
def
test_dimension1
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
]
cols
=
[
0
,
1
,
2
]
...
@@ -432,7 +399,6 @@ class TestCsrError(unittest.TestCase):
...
@@ -432,7 +399,6 @@ class TestCsrError(unittest.TestCase):
)
)
def
test_dimension2
(
self
):
def
test_dimension2
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
]
cols
=
[
0
,
1
,
2
]
...
@@ -443,7 +409,6 @@ class TestCsrError(unittest.TestCase):
...
@@ -443,7 +409,6 @@ class TestCsrError(unittest.TestCase):
)
)
def
test_same_shape1
(
self
):
def
test_same_shape1
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
,
3
]
...
@@ -454,7 +419,6 @@ class TestCsrError(unittest.TestCase):
...
@@ -454,7 +419,6 @@ class TestCsrError(unittest.TestCase):
)
)
def
test_same_shape2
(
self
):
def
test_same_shape2
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
,
3
]
...
@@ -465,7 +429,6 @@ class TestCsrError(unittest.TestCase):
...
@@ -465,7 +429,6 @@ class TestCsrError(unittest.TestCase):
)
)
def
test_same_shape3
(
self
):
def
test_same_shape3
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
,
0
,
1
,
2
]
crows
=
[
0
,
1
,
2
,
3
,
0
,
1
,
2
]
cols
=
[
0
,
1
,
2
,
3
,
0
,
1
,
2
]
cols
=
[
0
,
1
,
2
,
3
,
0
,
1
,
2
]
...
@@ -476,7 +439,6 @@ class TestCsrError(unittest.TestCase):
...
@@ -476,7 +439,6 @@ class TestCsrError(unittest.TestCase):
)
)
def
test_crows_first_value
(
self
):
def
test_crows_first_value
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
with
self
.
assertRaises
(
ValueError
):
crows
=
[
1
,
1
,
2
,
3
]
crows
=
[
1
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
]
cols
=
[
0
,
1
,
2
]
...
@@ -487,7 +449,6 @@ class TestCsrError(unittest.TestCase):
...
@@ -487,7 +449,6 @@ class TestCsrError(unittest.TestCase):
)
)
def
test_dtype
(
self
):
def
test_dtype
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
TypeError
):
with
self
.
assertRaises
(
TypeError
):
crows
=
[
0
,
1
,
2
,
3.0
]
crows
=
[
0
,
1
,
2
,
3.0
]
cols
=
[
0
,
1
,
2
]
cols
=
[
0
,
1
,
2
]
...
...
python/paddle/fluid/tests/unittests/test_split_op.py
浏览文件 @
1f93de31
...
@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
...
@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
class
TestSplitOp
(
OpTest
):
class
TestSplitOp
(
OpTest
):
...
@@ -453,7 +452,6 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
...
@@ -453,7 +452,6 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
input
.
stop_gradient
=
False
...
@@ -465,9 +463,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
...
@@ -465,9 +463,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
loss
.
backward
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
...
@@ -486,7 +482,6 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
...
@@ -486,7 +482,6 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
input
.
stop_gradient
=
False
...
@@ -498,9 +493,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
...
@@ -498,9 +493,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
loss
.
backward
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
...
@@ -522,7 +515,6 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -522,7 +515,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x2_out
=
x2
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
input
.
stop_gradient
=
False
...
@@ -534,9 +526,7 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -534,9 +526,7 @@ class API_TestDygraphSplit(unittest.TestCase):
loss
.
backward
()
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
...
@@ -570,7 +560,6 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -570,7 +560,6 @@ class API_TestDygraphSplit(unittest.TestCase):
out_dy_np
=
out_dy
.
numpy
()
out_dy_np
=
out_dy
.
numpy
()
ex_out
=
np
.
split
(
input_1
,
[
6
],
axis
=
1
)
ex_out
=
np
.
split
(
input_1
,
[
6
],
axis
=
1
)
ex_out
=
ex_out
[
0
]
ex_out
=
ex_out
[
0
]
with
_test_eager_guard
():
input
=
paddle
.
to_tensor
(
input_1
)
input
=
paddle
.
to_tensor
(
input_1
)
out_eager
=
paddle
.
split
(
input
,
[
6
],
axis
=
1
)
out_eager
=
paddle
.
split
(
input
,
[
6
],
axis
=
1
)
out_eager
=
out_eager
[
0
]
out_eager
=
out_eager
[
0
]
...
@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
,
rtol
=
1e-05
)
def
func
_negative_one_section
(
self
):
def
test
_negative_one_section
(
self
):
with
fluid
.
dygraph
.
guard
():
with
fluid
.
dygraph
.
guard
():
input_1
=
np
.
random
.
random
([
4
,
6
,
6
]).
astype
(
"int32"
)
input_1
=
np
.
random
.
random
([
4
,
6
,
6
]).
astype
(
"int32"
)
# input is a variable which shape is [4, 6, 6]
# input is a variable which shape is [4, 6, 6]
...
@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x0_out
=
x0
[
0
].
numpy
()
x0_out
=
x0
[
0
].
numpy
()
np
.
testing
.
assert_array_equal
(
x0_out
,
input
.
numpy
())
np
.
testing
.
assert_array_equal
(
x0_out
,
input
.
numpy
())
def
test_negative_one_section
(
self
):
with
_test_eager_guard
():
self
.
func_negative_one_section
()
self
.
func_negative_one_section
()
class
API_TestEmptySplit
(
unittest
.
TestCase
):
class
API_TestEmptySplit
(
unittest
.
TestCase
):
def
test_axis_input_empty_section
(
self
):
def
test_axis_input_empty_section
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录