未验证 提交 1f93de31 编写于 作者: 姜永久 提交者: GitHub

rm unittest eager guard tests part20 sparse_mv2split (#48879)

上级 eb322853
......@@ -19,7 +19,6 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.seed(100)
......@@ -43,38 +42,37 @@ def get_cuda_version():
class TestCsrMv(unittest.TestCase):
# x: csr-matrix, y: dense-vec, out: dense-vec
def test_mv(self):
with _test_eager_guard():
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
@unittest.skipIf(
......@@ -84,38 +82,37 @@ class TestCsrMv(unittest.TestCase):
class TestCooMv(unittest.TestCase):
# x: csr-matrix, y: dense-vec, out: dense-vec
def test_mv(self):
with _test_eager_guard():
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_coo(sparse_dim=2)
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_coo(sparse_dim=2)
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
if __name__ == "__main__":
......
......@@ -18,7 +18,6 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
class TestMaxPool3DFunc(unittest.TestCase):
......@@ -42,32 +41,31 @@ class TestMaxPool3DFunc(unittest.TestCase):
self.setPadding()
def test(self):
with _test_eager_guard():
self.setUp()
self.dense_x.stop_gradient = False
sparse_x = self.dense_x.to_sparse_coo(4)
sparse_out = paddle.sparse.nn.functional.max_pool3d(
sparse_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
)
out = sparse_out.to_dense()
out.backward(out)
dense_x = copy.deepcopy(self.dense_x)
dense_out = paddle.nn.functional.max_pool3d(
dense_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
data_format='NDHWC',
)
dense_out.backward(dense_out)
# compare with dense
assert np.allclose(dense_out.numpy(), out.numpy())
assert np.allclose(dense_x.grad.numpy(), self.dense_x.grad.numpy())
self.setUp()
self.dense_x.stop_gradient = False
sparse_x = self.dense_x.to_sparse_coo(4)
sparse_out = paddle.sparse.nn.functional.max_pool3d(
sparse_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
)
out = sparse_out.to_dense()
out.backward(out)
dense_x = copy.deepcopy(self.dense_x)
dense_out = paddle.nn.functional.max_pool3d(
dense_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
data_format='NDHWC',
)
dense_out.backward(dense_out)
# compare with dense
assert np.allclose(dense_out.numpy(), out.numpy())
assert np.allclose(dense_x.grad.numpy(), self.dense_x.grad.numpy())
class TestStride(TestMaxPool3DFunc):
......@@ -102,19 +100,18 @@ class TestInput(TestMaxPool3DFunc):
class TestMaxPool3DAPI(unittest.TestCase):
def test(self):
with _test_eager_guard():
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC'
)
out = max_pool3d(sparse_x)
out = out.to_dense()
dense_out = paddle.nn.functional.max_pool3d(
dense_x, 3, data_format='NDHWC'
)
assert np.allclose(dense_out.numpy(), out.numpy())
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC'
)
out = max_pool3d(sparse_x)
out = out.to_dense()
dense_out = paddle.nn.functional.max_pool3d(
dense_x, 3, data_format='NDHWC'
)
assert np.allclose(dense_out.numpy(), out.numpy())
if __name__ == "__main__":
......
......@@ -18,117 +18,110 @@ import numpy as np
import scipy.sparse as sp
import paddle
from paddle.fluid.framework import _test_eager_guard
np.random.seed(2022)
class TestCsrSoftmax(unittest.TestCase):
def test_softmax2d(self):
with _test_eager_guard():
mask = np.random.rand(16, 128) < 0.5
np_x = np.random.rand(16, 128) * mask
np_csr = sp.csr_matrix(np_x)
mask = np.random.rand(16, 128) < 0.5
np_x = np.random.rand(16, 128) * mask
np_csr = sp.csr_matrix(np_x)
row_number = np_csr.shape[0]
np_out = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
if start == end:
continue
x = np_csr.data[start:end]
x_max = np.max(x, keepdims=True)
x_exp = np.exp(x - x_max)
x_exp_sum = np.sum(x_exp, keepdims=True)
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(
out.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
out.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
if start == end:
continue
out = np_out[start:end]
dout = np_csr.data[start:end]
sum = np.sum(dout * out, keepdims=True)
dx = np.concatenate([dx, (dout - sum) * out])
np.testing.assert_allclose(
csr.grad.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
csr.grad.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(csr.grad.values().numpy(), dx, rtol=1e-05)
def test_softmax3d(self):
batchNum = 16
mask = np.random.rand(batchNum, 16, 128) < 0.5
np_x = np.random.rand(batchNum, 16, 128) * mask
np_out_list = []
np_out = np.array([])
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
np_out = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
for j in range(
row_number,
):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
x = np_csr.data[start:end]
x_max = np.max(x, keepdims=True)
x_exp = np.exp(x - x_max)
x_exp_sum = np.sum(x_exp, keepdims=True)
np_out_list.append(x_exp / x_exp_sum)
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(
out.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
out.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
batch_offset = 0
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
for j in range(row_number):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
out = np_out[start:end]
dout = np_csr.data[start:end]
out = np_out[batch_offset + start : batch_offset + end]
sum = np.sum(dout * out, keepdims=True)
dx = np.concatenate([dx, (dout - sum) * out])
np.testing.assert_allclose(
csr.grad.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
csr.grad.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(
csr.grad.values().numpy(), dx, rtol=1e-05
)
batch_offset += np_csr.nnz
def test_softmax3d(self):
with _test_eager_guard():
batchNum = 16
mask = np.random.rand(batchNum, 16, 128) < 0.5
np_x = np.random.rand(batchNum, 16, 128) * mask
np_out_list = []
np_out = np.array([])
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
for j in range(
row_number,
):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
x = np_csr.data[start:end]
x_max = np.max(x, keepdims=True)
x_exp = np.exp(x - x_max)
x_exp_sum = np.sum(x_exp, keepdims=True)
np_out_list.append(x_exp / x_exp_sum)
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
batch_offset = 0
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
for j in range(row_number):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
dout = np_csr.data[start:end]
out = np_out[batch_offset + start : batch_offset + end]
sum = np.sum(dout * out, keepdims=True)
dx = np.concatenate([dx, (dout - sum) * out])
batch_offset += np_csr.nnz
np.testing.assert_allclose(
csr.grad.values().numpy(), dx, rtol=1e-05
)
np.testing.assert_allclose(csr.grad.values().numpy(), dx, rtol=1e-05)
if __name__ == "__main__":
......
......@@ -17,38 +17,36 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
class TestTranspose(unittest.TestCase):
# x: sparse, out: sparse
def check_result(self, x_shape, dims, format):
with _test_eager_guard():
mask = paddle.randint(0, 2, x_shape).astype("float32")
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
origin_x = (paddle.rand(x_shape, dtype='float32') + 1) * mask
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_out = paddle.transpose(dense_x, dims)
mask = paddle.randint(0, 2, x_shape).astype("float32")
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
origin_x = (paddle.rand(x_shape, dtype='float32') + 1) * mask
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_out = paddle.transpose(dense_x, dims)
if format == "coo":
sp_x = origin_x.detach().to_sparse_coo(len(x_shape))
else:
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_out = paddle.sparse.transpose(sp_x, dims)
if format == "coo":
sp_x = origin_x.detach().to_sparse_coo(len(x_shape))
else:
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_out = paddle.sparse.transpose(sp_x, dims)
np.testing.assert_allclose(
sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05
)
dense_out.backward()
sp_out.backward()
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05
)
dense_out.backward()
sp_out.backward()
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
def test_transpose_2d(self):
self.check_result([2, 5], [0, 1], 'coo')
......
......@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestSplitOp(OpTest):
......@@ -453,24 +452,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out = x1.numpy()
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
with _test_eager_guard():
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(
input.gradient(), manul_grad, rtol=1e-05
)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
......@@ -486,24 +482,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out = x1.numpy()
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
with _test_eager_guard():
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, [2, 2, 2], dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(
input.gradient(), manul_grad, rtol=1e-05
)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, [2, 2, 2], dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
......@@ -522,24 +515,21 @@ class API_TestDygraphSplit(unittest.TestCase):
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
with _test_eager_guard():
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(
input.gradient(), manul_grad, rtol=1e-05
)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
......@@ -570,12 +560,11 @@ class API_TestDygraphSplit(unittest.TestCase):
out_dy_np = out_dy.numpy()
ex_out = np.split(input_1, [6], axis=1)
ex_out = ex_out[0]
with _test_eager_guard():
input = paddle.to_tensor(input_1)
out_eager = paddle.split(input, [6], axis=1)
out_eager = out_eager[0]
out_eager_np = out_dy.numpy()
np.testing.assert_allclose(ex_out, out_eager_np, rtol=1e-05)
input = paddle.to_tensor(input_1)
out_eager = paddle.split(input, [6], axis=1)
out_eager = out_eager[0]
out_eager_np = out_dy.numpy()
np.testing.assert_allclose(ex_out, out_eager_np, rtol=1e-05)
np.testing.assert_allclose(ex_out, out_dy_np, rtol=1e-05)
def test_out_tensor_input(self):
......@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
def func_negative_one_section(self):
def test_negative_one_section(self):
with fluid.dygraph.guard():
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
......@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x0_out = x0[0].numpy()
np.testing.assert_array_equal(x0_out, input.numpy())
def test_negative_one_section(self):
with _test_eager_guard():
self.func_negative_one_section()
self.func_negative_one_section()
class API_TestEmptySplit(unittest.TestCase):
def test_axis_input_empty_section(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册