Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1f93de31
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1f93de31
编写于
12月 14, 2022
作者:
姜
姜永久
提交者:
GitHub
12月 15, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rm unittest eager guard tests part20 sparse_mv2split (#48879)
上级
eb322853
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
625 addition
and
695 deletion
+625
-695
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
+62
-65
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
...on/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
+37
-40
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
...on/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
+81
-88
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
.../paddle/fluid/tests/unittests/test_sparse_transpose_op.py
+23
-25
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
+371
-410
python/paddle/fluid/tests/unittests/test_split_op.py
python/paddle/fluid/tests/unittests/test_split_op.py
+51
-67
未找到文件。
python/paddle/fluid/tests/unittests/test_sparse_mv_op.py
浏览文件 @
1f93de31
...
...
@@ -19,7 +19,6 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
seed
(
100
)
...
...
@@ -43,7 +42,6 @@ def get_cuda_version():
class
TestCsrMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
...
...
@@ -84,7 +82,6 @@ class TestCsrMv(unittest.TestCase):
class
TestCooMv
(
unittest
.
TestCase
):
# x: csr-matrix, y: dense-vec, out: dense-vec
def
test_mv
(
self
):
with
_test_eager_guard
():
paddle
.
set_default_dtype
(
'float64'
)
origin_x
=
paddle
.
rand
([
64
,
32
])
mask
=
paddle
.
randint
(
0
,
2
,
[
64
,
32
])
...
...
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
浏览文件 @
1f93de31
...
...
@@ -18,7 +18,6 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestMaxPool3DFunc
(
unittest
.
TestCase
):
...
...
@@ -42,7 +41,6 @@ class TestMaxPool3DFunc(unittest.TestCase):
self
.
setPadding
()
def
test
(
self
):
with
_test_eager_guard
():
self
.
setUp
()
self
.
dense_x
.
stop_gradient
=
False
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
...
...
@@ -102,7 +100,6 @@ class TestInput(TestMaxPool3DFunc):
class
TestMaxPool3DAPI
(
unittest
.
TestCase
):
def
test
(
self
):
with
_test_eager_guard
():
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
max_pool3d
=
paddle
.
sparse
.
nn
.
MaxPool3D
(
...
...
python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py
浏览文件 @
1f93de31
...
...
@@ -18,14 +18,12 @@ import numpy as np
import
scipy.sparse
as
sp
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
np
.
random
.
seed
(
2022
)
class
TestCsrSoftmax
(
unittest
.
TestCase
):
def
test_softmax2d
(
self
):
with
_test_eager_guard
():
mask
=
np
.
random
.
rand
(
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
16
,
128
)
*
mask
np_csr
=
sp
.
csr_matrix
(
np_x
)
...
...
@@ -73,12 +71,9 @@ class TestCsrSoftmax(unittest.TestCase):
np
.
testing
.
assert_allclose
(
csr
.
grad
.
cols
().
numpy
(),
np_csr
.
indices
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
def
test_softmax3d
(
self
):
with
_test_eager_guard
():
batchNum
=
16
mask
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
<
0.5
np_x
=
np
.
random
.
rand
(
batchNum
,
16
,
128
)
*
mask
...
...
@@ -126,9 +121,7 @@ class TestCsrSoftmax(unittest.TestCase):
batch_offset
+=
np_csr
.
nnz
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
csr
.
grad
.
values
().
numpy
(),
dx
,
rtol
=
1e-05
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py
浏览文件 @
1f93de31
...
...
@@ -17,13 +17,11 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
class
TestTranspose
(
unittest
.
TestCase
):
# x: sparse, out: sparse
def
check_result
(
self
,
x_shape
,
dims
,
format
):
with
_test_eager_guard
():
mask
=
paddle
.
randint
(
0
,
2
,
x_shape
).
astype
(
"float32"
)
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
...
...
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
浏览文件 @
1f93de31
...
...
@@ -19,14 +19,12 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid.framework
import
_test_eager_guard
devices
=
[
'cpu'
,
'gpu'
]
class
TestSparseCreate
(
unittest
.
TestCase
):
def
test_create_coo_by_tensor
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1
,
2
,
3
,
4
,
5
]
dense_shape
=
[
3
,
4
]
...
...
@@ -40,7 +38,6 @@ class TestSparseCreate(unittest.TestCase):
assert
np
.
array_equal
(
values
,
coo
.
values
().
numpy
())
def
test_create_coo_by_np
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
1
,
2
],
[
1
,
2
,
0
]]
values
=
[
1.0
,
2.0
,
3.0
]
dense_shape
=
[
3
,
3
]
...
...
@@ -50,7 +47,6 @@ class TestSparseCreate(unittest.TestCase):
assert
np
.
array_equal
(
values
,
coo
.
values
().
numpy
())
def
test_create_csr_by_tensor
(
self
):
with
_test_eager_guard
():
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
values
=
[
1
,
2
,
3
,
4
,
5
]
...
...
@@ -68,14 +64,11 @@ class TestSparseCreate(unittest.TestCase):
)
def
test_create_csr_by_np
(
self
):
with
_test_eager_guard
():
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
values
=
[
1
,
2
,
3
,
4
,
5
]
dense_shape
=
[
3
,
4
]
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
dense_shape
)
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
dense_shape
)
# test the to_string.py
assert
np
.
array_equal
(
5
,
csr
.
nnz
())
assert
np
.
array_equal
(
crows
,
csr
.
crows
().
numpy
())
...
...
@@ -83,7 +76,6 @@ class TestSparseCreate(unittest.TestCase):
assert
np
.
array_equal
(
values
,
csr
.
values
().
numpy
())
def
test_place
(
self
):
with
_test_eager_guard
():
place
=
core
.
CPUPlace
()
indices
=
[[
0
,
1
],
[
0
,
1
]]
values
=
[
1.0
,
2.0
]
...
...
@@ -107,7 +99,6 @@ class TestSparseCreate(unittest.TestCase):
assert
csr
.
values
().
place
.
is_cpu_place
()
def
test_dtype
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
1
],
[
0
,
1
]]
values
=
[
1.0
,
2.0
]
dense_shape
=
[
2
,
2
]
...
...
@@ -127,7 +118,6 @@ class TestSparseCreate(unittest.TestCase):
assert
csr
.
dtype
==
paddle
.
float16
def
test_create_coo_no_shape
(
self
):
with
_test_eager_guard
():
indices
=
[[
0
,
1
],
[
0
,
1
]]
values
=
[
1.0
,
2.0
]
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
...
...
@@ -138,7 +128,6 @@ class TestSparseCreate(unittest.TestCase):
class
TestSparseConvert
(
unittest
.
TestCase
):
def
test_to_sparse_coo
(
self
):
with
_test_eager_guard
():
x
=
[[
0
,
1
,
0
,
2
],
[
0
,
0
,
3
,
0
],
[
4
,
5
,
0
,
0
]]
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
...
...
@@ -156,13 +145,10 @@ class TestSparseConvert(unittest.TestCase):
stop_gradient
=
True
,
)
out
.
backward
(
out_grad
)
assert
np
.
array_equal
(
dense_x
.
grad
.
numpy
(),
out_grad
.
to_dense
().
numpy
()
)
assert
np
.
array_equal
(
dense_x
.
grad
.
numpy
(),
out_grad
.
to_dense
().
numpy
())
def
test_coo_to_dense
(
self
):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
indices_dtypes
=
[
'int32'
,
'int64'
]
...
...
@@ -202,7 +188,6 @@ class TestSparseConvert(unittest.TestCase):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
False
})
def
test_to_sparse_csr
(
self
):
with
_test_eager_guard
():
x
=
[[
0
,
1
,
0
,
2
],
[
0
,
0
,
3
,
0
],
[
4
,
5
,
0
,
0
]]
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
...
...
@@ -218,7 +203,6 @@ class TestSparseConvert(unittest.TestCase):
def
test_coo_values_grad
(
self
):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
True
})
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
...
...
@@ -260,7 +244,6 @@ class TestSparseConvert(unittest.TestCase):
fluid
.
set_flags
({
"FLAGS_retain_grad_for_all_tensor"
:
False
})
def
test_sparse_coo_tensor_grad
(
self
):
with
_test_eager_guard
():
for
device
in
devices
:
if
device
==
'cpu'
or
(
device
==
'gpu'
and
paddle
.
is_compiled_with_cuda
()
...
...
@@ -284,9 +267,7 @@ class TestSparseConvert(unittest.TestCase):
)
sparse_x
.
backward
(
sparse_out_grad
)
correct_values_grad
=
[
0
,
3
]
assert
np
.
array_equal
(
correct_values_grad
,
values
.
grad
.
numpy
()
)
assert
np
.
array_equal
(
correct_values_grad
,
values
.
grad
.
numpy
())
# test the non-zero values is a vector
values
=
[[
1
,
1
],
[
2
,
2
]]
...
...
@@ -303,12 +284,9 @@ class TestSparseConvert(unittest.TestCase):
)
sparse_x
.
backward
(
sparse_out_grad
)
correct_values_grad
=
[[
0
,
0
],
[
3
,
3
]]
assert
np
.
array_equal
(
correct_values_grad
,
values
.
grad
.
numpy
()
)
assert
np
.
array_equal
(
correct_values_grad
,
values
.
grad
.
numpy
())
def
test_sparse_coo_tensor_sorted
(
self
):
with
_test_eager_guard
():
for
device
in
devices
:
if
device
==
'cpu'
or
(
device
==
'gpu'
and
paddle
.
is_compiled_with_cuda
()
...
...
@@ -326,9 +304,7 @@ class TestSparseConvert(unittest.TestCase):
assert
np
.
array_equal
(
indices_sorted
,
sparse_x
.
indices
().
numpy
()
)
assert
np
.
array_equal
(
values_sorted
,
sparse_x
.
values
().
numpy
()
)
assert
np
.
array_equal
(
values_sorted
,
sparse_x
.
values
().
numpy
())
# test the non-zero values is a vector
values
=
[[
1.0
,
1.0
],
[
2.0
,
2.0
],
[
3.0
,
3.0
]]
...
...
@@ -339,13 +315,9 @@ class TestSparseConvert(unittest.TestCase):
assert
np
.
array_equal
(
indices_sorted
,
sparse_x
.
indices
().
numpy
()
)
assert
np
.
array_equal
(
values_sorted
,
sparse_x
.
values
().
numpy
()
)
assert
np
.
array_equal
(
values_sorted
,
sparse_x
.
values
().
numpy
())
def
test_batch_csr
(
self
):
with
_test_eager_guard
():
def
verify
(
dense_x
):
sparse_x
=
dense_x
.
to_sparse_csr
()
out
=
sparse_x
.
to_dense
()
...
...
@@ -383,7 +355,6 @@ class TestSparseConvert(unittest.TestCase):
class
TestCooError
(
unittest
.
TestCase
):
def
test_small_shape
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
indices
=
[[
2
,
3
],
[
0
,
2
]]
values
=
[
1
,
2
]
...
...
@@ -394,7 +365,6 @@ class TestCooError(unittest.TestCase):
)
def
test_same_nnz
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
# 2. test the nnz of indices must same as nnz of values
indices
=
[[
1
,
2
],
[
1
,
0
]]
...
...
@@ -402,7 +372,6 @@ class TestCooError(unittest.TestCase):
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
def
test_same_dimensions
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
indices
=
[[
1
,
2
],
[
1
,
0
]]
values
=
[
1
,
2
,
3
]
...
...
@@ -412,7 +381,6 @@ class TestCooError(unittest.TestCase):
)
def
test_indices_dtype
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
TypeError
):
indices
=
[[
1.0
,
2.0
],
[
0
,
1
]]
values
=
[
1
,
2
]
...
...
@@ -421,7 +389,6 @@ class TestCooError(unittest.TestCase):
class
TestCsrError
(
unittest
.
TestCase
):
def
test_dimension1
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
]
...
...
@@ -432,7 +399,6 @@ class TestCsrError(unittest.TestCase):
)
def
test_dimension2
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
]
...
...
@@ -443,7 +409,6 @@ class TestCsrError(unittest.TestCase):
)
def
test_same_shape1
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
,
3
]
...
...
@@ -454,7 +419,6 @@ class TestCsrError(unittest.TestCase):
)
def
test_same_shape2
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
,
3
]
...
...
@@ -465,7 +429,6 @@ class TestCsrError(unittest.TestCase):
)
def
test_same_shape3
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
crows
=
[
0
,
1
,
2
,
3
,
0
,
1
,
2
]
cols
=
[
0
,
1
,
2
,
3
,
0
,
1
,
2
]
...
...
@@ -476,7 +439,6 @@ class TestCsrError(unittest.TestCase):
)
def
test_crows_first_value
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
ValueError
):
crows
=
[
1
,
1
,
2
,
3
]
cols
=
[
0
,
1
,
2
]
...
...
@@ -487,7 +449,6 @@ class TestCsrError(unittest.TestCase):
)
def
test_dtype
(
self
):
with
_test_eager_guard
():
with
self
.
assertRaises
(
TypeError
):
crows
=
[
0
,
1
,
2
,
3.0
]
cols
=
[
0
,
1
,
2
]
...
...
python/paddle/fluid/tests/unittests/test_split_op.py
浏览文件 @
1f93de31
...
...
@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
class
TestSplitOp
(
OpTest
):
...
...
@@ -453,7 +452,6 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
...
...
@@ -465,9 +463,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
...
...
@@ -486,7 +482,6 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out
=
x1
.
numpy
()
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
...
...
@@ -498,9 +493,7 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
...
...
@@ -522,7 +515,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x2_out
=
x2
.
numpy
()
ex_x0
,
ex_x1
,
ex_x2
=
np
.
split
(
input_1
,
3
,
axis
=
1
)
with
_test_eager_guard
():
# input is a variable which shape is [4, 6, 6]
input
=
paddle
.
to_tensor
(
input_1
)
input
.
stop_gradient
=
False
...
...
@@ -534,9 +526,7 @@ class API_TestDygraphSplit(unittest.TestCase):
loss
.
backward
()
manul_grad
=
np
.
zeros_like
(
input_1
)
manul_grad
[:,
:
2
,
:]
=
1
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
input
.
gradient
(),
manul_grad
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x0
,
eager_x0_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x1
,
eager_x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
eager_x2_out
,
rtol
=
1e-05
)
...
...
@@ -570,7 +560,6 @@ class API_TestDygraphSplit(unittest.TestCase):
out_dy_np
=
out_dy
.
numpy
()
ex_out
=
np
.
split
(
input_1
,
[
6
],
axis
=
1
)
ex_out
=
ex_out
[
0
]
with
_test_eager_guard
():
input
=
paddle
.
to_tensor
(
input_1
)
out_eager
=
paddle
.
split
(
input
,
[
6
],
axis
=
1
)
out_eager
=
out_eager
[
0
]
...
...
@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
np
.
testing
.
assert_allclose
(
ex_x1
,
x1_out
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
ex_x2
,
x2_out
,
rtol
=
1e-05
)
def
func
_negative_one_section
(
self
):
def
test
_negative_one_section
(
self
):
with
fluid
.
dygraph
.
guard
():
input_1
=
np
.
random
.
random
([
4
,
6
,
6
]).
astype
(
"int32"
)
# input is a variable which shape is [4, 6, 6]
...
...
@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x0_out
=
x0
[
0
].
numpy
()
np
.
testing
.
assert_array_equal
(
x0_out
,
input
.
numpy
())
def
test_negative_one_section
(
self
):
with
_test_eager_guard
():
self
.
func_negative_one_section
()
self
.
func_negative_one_section
()
class
API_TestEmptySplit
(
unittest
.
TestCase
):
def
test_axis_input_empty_section
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录