未验证 提交 1f93de31 编写于 作者: 姜永久 提交者: GitHub

rm unittest eager guard tests part20 sparse_mv2split (#48879)

上级 eb322853
......@@ -19,7 +19,6 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.seed(100)
......@@ -43,38 +42,37 @@ def get_cuda_version():
class TestCsrMv(unittest.TestCase):
# x: csr-matrix, y: dense-vec, out: dense-vec
def test_mv(self):
with _test_eager_guard():
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
@unittest.skipIf(
......@@ -84,38 +82,37 @@ class TestCsrMv(unittest.TestCase):
class TestCooMv(unittest.TestCase):
# x: csr-matrix, y: dense-vec, out: dense-vec
def test_mv(self):
with _test_eager_guard():
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_coo(sparse_dim=2)
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
paddle.set_default_dtype('float64')
origin_x = paddle.rand([64, 32])
mask = paddle.randint(0, 2, [64, 32])
origin_x = origin_x * mask
origin_vec = paddle.rand([32])
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_vec = origin_vec.detach()
dense_vec.stop_gradient = False
dense_out = paddle.mv(dense_x, dense_vec)
dense_out.backward()
sp_x = origin_x.detach().to_sparse_coo(sparse_dim=2)
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(
sp_out.numpy(), dense_out.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05
)
if __name__ == "__main__":
......
......@@ -18,7 +18,6 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
class TestMaxPool3DFunc(unittest.TestCase):
......@@ -42,32 +41,31 @@ class TestMaxPool3DFunc(unittest.TestCase):
self.setPadding()
def test(self):
with _test_eager_guard():
self.setUp()
self.dense_x.stop_gradient = False
sparse_x = self.dense_x.to_sparse_coo(4)
sparse_out = paddle.sparse.nn.functional.max_pool3d(
sparse_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
)
out = sparse_out.to_dense()
out.backward(out)
dense_x = copy.deepcopy(self.dense_x)
dense_out = paddle.nn.functional.max_pool3d(
dense_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
data_format='NDHWC',
)
dense_out.backward(dense_out)
# compare with dense
assert np.allclose(dense_out.numpy(), out.numpy())
assert np.allclose(dense_x.grad.numpy(), self.dense_x.grad.numpy())
self.setUp()
self.dense_x.stop_gradient = False
sparse_x = self.dense_x.to_sparse_coo(4)
sparse_out = paddle.sparse.nn.functional.max_pool3d(
sparse_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
)
out = sparse_out.to_dense()
out.backward(out)
dense_x = copy.deepcopy(self.dense_x)
dense_out = paddle.nn.functional.max_pool3d(
dense_x,
self.kernel_sizes,
stride=self.strides,
padding=self.paddings,
data_format='NDHWC',
)
dense_out.backward(dense_out)
# compare with dense
assert np.allclose(dense_out.numpy(), out.numpy())
assert np.allclose(dense_x.grad.numpy(), self.dense_x.grad.numpy())
class TestStride(TestMaxPool3DFunc):
......@@ -102,19 +100,18 @@ class TestInput(TestMaxPool3DFunc):
class TestMaxPool3DAPI(unittest.TestCase):
def test(self):
with _test_eager_guard():
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC'
)
out = max_pool3d(sparse_x)
out = out.to_dense()
dense_out = paddle.nn.functional.max_pool3d(
dense_x, 3, data_format='NDHWC'
)
assert np.allclose(dense_out.numpy(), out.numpy())
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC'
)
out = max_pool3d(sparse_x)
out = out.to_dense()
dense_out = paddle.nn.functional.max_pool3d(
dense_x, 3, data_format='NDHWC'
)
assert np.allclose(dense_out.numpy(), out.numpy())
if __name__ == "__main__":
......
......@@ -18,117 +18,110 @@ import numpy as np
import scipy.sparse as sp
import paddle
from paddle.fluid.framework import _test_eager_guard
np.random.seed(2022)
class TestCsrSoftmax(unittest.TestCase):
def test_softmax2d(self):
with _test_eager_guard():
mask = np.random.rand(16, 128) < 0.5
np_x = np.random.rand(16, 128) * mask
np_csr = sp.csr_matrix(np_x)
mask = np.random.rand(16, 128) < 0.5
np_x = np.random.rand(16, 128) * mask
np_csr = sp.csr_matrix(np_x)
row_number = np_csr.shape[0]
np_out = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
if start == end:
continue
x = np_csr.data[start:end]
x_max = np.max(x, keepdims=True)
x_exp = np.exp(x - x_max)
x_exp_sum = np.sum(x_exp, keepdims=True)
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(
out.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
out.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
if start == end:
continue
out = np_out[start:end]
dout = np_csr.data[start:end]
sum = np.sum(dout * out, keepdims=True)
dx = np.concatenate([dx, (dout - sum) * out])
np.testing.assert_allclose(
csr.grad.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
csr.grad.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(csr.grad.values().numpy(), dx, rtol=1e-05)
def test_softmax3d(self):
batchNum = 16
mask = np.random.rand(batchNum, 16, 128) < 0.5
np_x = np.random.rand(batchNum, 16, 128) * mask
np_out_list = []
np_out = np.array([])
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
np_out = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
for j in range(
row_number,
):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
x = np_csr.data[start:end]
x_max = np.max(x, keepdims=True)
x_exp = np.exp(x - x_max)
x_exp_sum = np.sum(x_exp, keepdims=True)
np_out_list.append(x_exp / x_exp_sum)
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(
out.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
out.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
batch_offset = 0
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
for j in range(row_number):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
out = np_out[start:end]
dout = np_csr.data[start:end]
out = np_out[batch_offset + start : batch_offset + end]
sum = np.sum(dout * out, keepdims=True)
dx = np.concatenate([dx, (dout - sum) * out])
np.testing.assert_allclose(
csr.grad.crows().numpy(), np_csr.indptr, rtol=1e-05
)
np.testing.assert_allclose(
csr.grad.cols().numpy(), np_csr.indices, rtol=1e-05
)
np.testing.assert_allclose(
csr.grad.values().numpy(), dx, rtol=1e-05
)
batch_offset += np_csr.nnz
def test_softmax3d(self):
with _test_eager_guard():
batchNum = 16
mask = np.random.rand(batchNum, 16, 128) < 0.5
np_x = np.random.rand(batchNum, 16, 128) * mask
np_out_list = []
np_out = np.array([])
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
for j in range(
row_number,
):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
x = np_csr.data[start:end]
x_max = np.max(x, keepdims=True)
x_exp = np.exp(x - x_max)
x_exp_sum = np.sum(x_exp, keepdims=True)
np_out_list.append(x_exp / x_exp_sum)
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
dx = np.array([])
batch_offset = 0
for i in range(batchNum):
np_csr = sp.csr_matrix(np_x[i, :, :])
row_number = np_csr.shape[0]
for j in range(row_number):
start = np_csr.indptr[j]
end = np_csr.indptr[j + 1]
if start == end:
continue
dout = np_csr.data[start:end]
out = np_out[batch_offset + start : batch_offset + end]
sum = np.sum(dout * out, keepdims=True)
dx = np.concatenate([dx, (dout - sum) * out])
batch_offset += np_csr.nnz
np.testing.assert_allclose(
csr.grad.values().numpy(), dx, rtol=1e-05
)
np.testing.assert_allclose(csr.grad.values().numpy(), dx, rtol=1e-05)
if __name__ == "__main__":
......
......@@ -17,38 +17,36 @@ import unittest
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
class TestTranspose(unittest.TestCase):
# x: sparse, out: sparse
def check_result(self, x_shape, dims, format):
with _test_eager_guard():
mask = paddle.randint(0, 2, x_shape).astype("float32")
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
origin_x = (paddle.rand(x_shape, dtype='float32') + 1) * mask
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_out = paddle.transpose(dense_x, dims)
mask = paddle.randint(0, 2, x_shape).astype("float32")
# "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask",
# or the backward checks may fail.
origin_x = (paddle.rand(x_shape, dtype='float32') + 1) * mask
dense_x = origin_x.detach()
dense_x.stop_gradient = False
dense_out = paddle.transpose(dense_x, dims)
if format == "coo":
sp_x = origin_x.detach().to_sparse_coo(len(x_shape))
else:
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_out = paddle.sparse.transpose(sp_x, dims)
if format == "coo":
sp_x = origin_x.detach().to_sparse_coo(len(x_shape))
else:
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_out = paddle.sparse.transpose(sp_x, dims)
np.testing.assert_allclose(
sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05
)
dense_out.backward()
sp_out.backward()
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
np.testing.assert_allclose(
sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05
)
dense_out.backward()
sp_out.backward()
np.testing.assert_allclose(
sp_x.grad.to_dense().numpy(),
(dense_x.grad * mask).numpy(),
rtol=1e-05,
)
def test_transpose_2d(self):
self.check_result([2, 5], [0, 1], 'coo')
......
......@@ -19,483 +19,444 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
devices = ['cpu', 'gpu']
class TestSparseCreate(unittest.TestCase):
def test_create_coo_by_tensor(self):
with _test_eager_guard():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
dense_indices = paddle.to_tensor(indices)
dense_elements = paddle.to_tensor(values, dtype='float32')
coo = paddle.sparse.sparse_coo_tensor(
dense_indices, dense_elements, dense_shape, stop_gradient=False
)
# test the to_string.py
assert np.array_equal(indices, coo.indices().numpy())
assert np.array_equal(values, coo.values().numpy())
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
dense_indices = paddle.to_tensor(indices)
dense_elements = paddle.to_tensor(values, dtype='float32')
coo = paddle.sparse.sparse_coo_tensor(
dense_indices, dense_elements, dense_shape, stop_gradient=False
)
# test the to_string.py
assert np.array_equal(indices, coo.indices().numpy())
assert np.array_equal(values, coo.values().numpy())
def test_create_coo_by_np(self):
with _test_eager_guard():
indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0]
dense_shape = [3, 3]
coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
assert np.array_equal(3, coo.nnz())
assert np.array_equal(indices, coo.indices().numpy())
assert np.array_equal(values, coo.values().numpy())
indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0]
dense_shape = [3, 3]
coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
assert np.array_equal(3, coo.nnz())
assert np.array_equal(indices, coo.indices().numpy())
assert np.array_equal(values, coo.values().numpy())
def test_create_csr_by_tensor(self):
with _test_eager_guard():
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
dense_crows = paddle.to_tensor(crows)
dense_cols = paddle.to_tensor(cols)
dense_elements = paddle.to_tensor(values, dtype='float32')
stop_gradient = False
csr = paddle.sparse.sparse_csr_tensor(
dense_crows,
dense_cols,
dense_elements,
dense_shape,
stop_gradient=stop_gradient,
)
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
dense_crows = paddle.to_tensor(crows)
dense_cols = paddle.to_tensor(cols)
dense_elements = paddle.to_tensor(values, dtype='float32')
stop_gradient = False
csr = paddle.sparse.sparse_csr_tensor(
dense_crows,
dense_cols,
dense_elements,
dense_shape,
stop_gradient=stop_gradient,
)
def test_create_csr_by_np(self):
with _test_eager_guard():
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
csr = paddle.sparse.sparse_csr_tensor(
crows, cols, values, dense_shape
)
# test the to_string.py
assert np.array_equal(5, csr.nnz())
assert np.array_equal(crows, csr.crows().numpy())
assert np.array_equal(cols, csr.cols().numpy())
assert np.array_equal(values, csr.values().numpy())
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
# test the to_string.py
assert np.array_equal(5, csr.nnz())
assert np.array_equal(crows, csr.crows().numpy())
assert np.array_equal(cols, csr.cols().numpy())
assert np.array_equal(values, csr.values().numpy())
def test_place(self):
with _test_eager_guard():
place = core.CPUPlace()
indices = [[0, 1], [0, 1]]
values = [1.0, 2.0]
dense_shape = [2, 2]
coo = paddle.sparse.sparse_coo_tensor(
indices, values, dense_shape, place=place
)
assert coo.place.is_cpu_place()
assert coo.values().place.is_cpu_place()
assert coo.indices().place.is_cpu_place()
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
csr = paddle.sparse.sparse_csr_tensor(
crows, cols, values, [3, 5], place=place
)
assert csr.place.is_cpu_place()
assert csr.crows().place.is_cpu_place()
assert csr.cols().place.is_cpu_place()
assert csr.values().place.is_cpu_place()
place = core.CPUPlace()
indices = [[0, 1], [0, 1]]
values = [1.0, 2.0]
dense_shape = [2, 2]
coo = paddle.sparse.sparse_coo_tensor(
indices, values, dense_shape, place=place
)
assert coo.place.is_cpu_place()
assert coo.values().place.is_cpu_place()
assert coo.indices().place.is_cpu_place()
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
csr = paddle.sparse.sparse_csr_tensor(
crows, cols, values, [3, 5], place=place
)
assert csr.place.is_cpu_place()
assert csr.crows().place.is_cpu_place()
assert csr.cols().place.is_cpu_place()
assert csr.values().place.is_cpu_place()
def test_dtype(self):
with _test_eager_guard():
indices = [[0, 1], [0, 1]]
values = [1.0, 2.0]
dense_shape = [2, 2]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
coo = paddle.sparse.sparse_coo_tensor(
indices, values, dense_shape, dtype='float64'
)
assert coo.dtype == paddle.float64
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
csr = paddle.sparse.sparse_csr_tensor(
crows, cols, values, [3, 5], dtype='float16'
)
assert csr.dtype == paddle.float16
indices = [[0, 1], [0, 1]]
values = [1.0, 2.0]
dense_shape = [2, 2]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
coo = paddle.sparse.sparse_coo_tensor(
indices, values, dense_shape, dtype='float64'
)
assert coo.dtype == paddle.float64
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
csr = paddle.sparse.sparse_csr_tensor(
crows, cols, values, [3, 5], dtype='float16'
)
assert csr.dtype == paddle.float16
def test_create_coo_no_shape(self):
with _test_eager_guard():
indices = [[0, 1], [0, 1]]
values = [1.0, 2.0]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
coo = paddle.sparse.sparse_coo_tensor(indices, values)
assert [2, 2] == coo.shape
indices = [[0, 1], [0, 1]]
values = [1.0, 2.0]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
coo = paddle.sparse.sparse_coo_tensor(indices, values)
assert [2, 2] == coo.shape
class TestSparseConvert(unittest.TestCase):
def test_to_sparse_coo(self):
with _test_eager_guard():
x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]]
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
dense_x = paddle.to_tensor(x, dtype='float32', stop_gradient=False)
out = dense_x.to_sparse_coo(2)
assert np.array_equal(out.indices().numpy(), indices)
assert np.array_equal(out.values().numpy(), values)
# test to_sparse_coo_grad backward
out_grad_indices = [[0, 1], [0, 1]]
out_grad_values = [2.0, 3.0]
out_grad = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(out_grad_indices),
paddle.to_tensor(out_grad_values),
shape=out.shape,
stop_gradient=True,
)
out.backward(out_grad)
assert np.array_equal(
dense_x.grad.numpy(), out_grad.to_dense().numpy()
)
x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]]
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
dense_x = paddle.to_tensor(x, dtype='float32', stop_gradient=False)
out = dense_x.to_sparse_coo(2)
assert np.array_equal(out.indices().numpy(), indices)
assert np.array_equal(out.values().numpy(), values)
# test to_sparse_coo_grad backward
out_grad_indices = [[0, 1], [0, 1]]
out_grad_values = [2.0, 3.0]
out_grad = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(out_grad_indices),
paddle.to_tensor(out_grad_values),
shape=out.shape,
stop_gradient=True,
)
out.backward(out_grad)
assert np.array_equal(dense_x.grad.numpy(), out_grad.to_dense().numpy())
def test_coo_to_dense(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
indices_dtypes = ['int32', 'int64']
for indices_dtype in indices_dtypes:
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices, dtype=indices_dtype),
paddle.to_tensor(values),
shape=[3, 4],
stop_gradient=False,
)
dense_tensor = sparse_x.to_dense()
# test to_dense_grad backward
out_grad = [
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
]
dense_tensor.backward(paddle.to_tensor(out_grad))
# mask the out_grad by sparse_x.indices()
correct_x_grad = [2.0, 4.0, 7.0, 9.0, 10.0]
assert np.array_equal(
correct_x_grad, sparse_x.grad.values().numpy()
)
paddle.device.set_device("cpu")
sparse_x_cpu = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices, dtype=indices_dtype),
paddle.to_tensor(values),
shape=[3, 4],
stop_gradient=False,
)
dense_tensor_cpu = sparse_x_cpu.to_dense()
dense_tensor_cpu.backward(paddle.to_tensor(out_grad))
assert np.array_equal(
correct_x_grad, sparse_x_cpu.grad.values().numpy()
)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_to_sparse_csr(self):
with _test_eager_guard():
x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]]
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_x = paddle.to_tensor(x)
out = dense_x.to_sparse_csr()
assert np.array_equal(out.crows().numpy(), crows)
assert np.array_equal(out.cols().numpy(), cols)
assert np.array_equal(out.values().numpy(), values)
dense_tensor = out.to_dense()
assert np.array_equal(dense_tensor.numpy(), x)
def test_coo_values_grad(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
indices_dtypes = ['int32', 'int64']
for indices_dtype in indices_dtypes:
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices),
paddle.to_tensor(indices, dtype=indices_dtype),
paddle.to_tensor(values),
shape=[3, 4],
stop_gradient=False,
)
values_tensor = sparse_x.values()
out_grad = [2.0, 3.0, 5.0, 8.0, 9.0]
# test coo_values_grad
values_tensor.backward(paddle.to_tensor(out_grad))
assert np.array_equal(out_grad, sparse_x.grad.values().numpy())
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[4.0, 4.0],
[5.0, 5.0],
dense_tensor = sparse_x.to_dense()
# test to_dense_grad backward
out_grad = [
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
]
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices),
dense_tensor.backward(paddle.to_tensor(out_grad))
# mask the out_grad by sparse_x.indices()
correct_x_grad = [2.0, 4.0, 7.0, 9.0, 10.0]
assert np.array_equal(
correct_x_grad, sparse_x.grad.values().numpy()
)
paddle.device.set_device("cpu")
sparse_x_cpu = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices, dtype=indices_dtype),
paddle.to_tensor(values),
shape=[3, 4, 2],
shape=[3, 4],
stop_gradient=False,
)
values_tensor = sparse_x.values()
out_grad = [
[2.0, 2.0],
[3.0, 3.0],
[5.0, 5.0],
[8.0, 8.0],
[9.0, 9.0],
]
# test coo_values_grad
values_tensor.backward(paddle.to_tensor(out_grad))
assert np.array_equal(out_grad, sparse_x.grad.values().numpy())
dense_tensor_cpu = sparse_x_cpu.to_dense()
dense_tensor_cpu.backward(paddle.to_tensor(out_grad))
assert np.array_equal(
correct_x_grad, sparse_x_cpu.grad.values().numpy()
)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_to_sparse_csr(self):
x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]]
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_x = paddle.to_tensor(x)
out = dense_x.to_sparse_csr()
assert np.array_equal(out.crows().numpy(), crows)
assert np.array_equal(out.cols().numpy(), cols)
assert np.array_equal(out.values().numpy(), values)
dense_tensor = out.to_dense()
assert np.array_equal(dense_tensor.numpy(), x)
def test_coo_values_grad(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices),
paddle.to_tensor(values),
shape=[3, 4],
stop_gradient=False,
)
values_tensor = sparse_x.values()
out_grad = [2.0, 3.0, 5.0, 8.0, 9.0]
# test coo_values_grad
values_tensor.backward(paddle.to_tensor(out_grad))
assert np.array_equal(out_grad, sparse_x.grad.values().numpy())
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[4.0, 4.0],
[5.0, 5.0],
]
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices),
paddle.to_tensor(values),
shape=[3, 4, 2],
stop_gradient=False,
)
values_tensor = sparse_x.values()
out_grad = [
[2.0, 2.0],
[3.0, 3.0],
[5.0, 5.0],
[8.0, 8.0],
[9.0, 9.0],
]
# test coo_values_grad
values_tensor.backward(paddle.to_tensor(out_grad))
assert np.array_equal(out_grad, sparse_x.grad.values().numpy())
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def test_sparse_coo_tensor_grad(self):
with _test_eager_guard():
for device in devices:
if device == 'cpu' or (
device == 'gpu' and paddle.is_compiled_with_cuda()
):
paddle.device.set_device(device)
indices = [[0, 1], [0, 1]]
values = [1, 2]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(
values, dtype='float32', stop_gradient=False
)
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=[2, 2], stop_gradient=False
)
grad_indices = [[0, 1], [1, 1]]
grad_values = [2, 3]
grad_indices = paddle.to_tensor(grad_indices, dtype='int32')
grad_values = paddle.to_tensor(grad_values, dtype='float32')
sparse_out_grad = paddle.sparse.sparse_coo_tensor(
grad_indices, grad_values, shape=[2, 2]
)
sparse_x.backward(sparse_out_grad)
correct_values_grad = [0, 3]
assert np.array_equal(
correct_values_grad, values.grad.numpy()
)
# test the non-zero values is a vector
values = [[1, 1], [2, 2]]
values = paddle.to_tensor(
values, dtype='float32', stop_gradient=False
)
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=[2, 2, 2], stop_gradient=False
)
grad_values = [[2, 2], [3, 3]]
grad_values = paddle.to_tensor(grad_values, dtype='float32')
sparse_out_grad = paddle.sparse.sparse_coo_tensor(
grad_indices, grad_values, shape=[2, 2, 2]
)
sparse_x.backward(sparse_out_grad)
correct_values_grad = [[0, 0], [3, 3]]
assert np.array_equal(
correct_values_grad, values.grad.numpy()
)
for device in devices:
if device == 'cpu' or (
device == 'gpu' and paddle.is_compiled_with_cuda()
):
paddle.device.set_device(device)
indices = [[0, 1], [0, 1]]
values = [1, 2]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(
values, dtype='float32', stop_gradient=False
)
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=[2, 2], stop_gradient=False
)
grad_indices = [[0, 1], [1, 1]]
grad_values = [2, 3]
grad_indices = paddle.to_tensor(grad_indices, dtype='int32')
grad_values = paddle.to_tensor(grad_values, dtype='float32')
sparse_out_grad = paddle.sparse.sparse_coo_tensor(
grad_indices, grad_values, shape=[2, 2]
)
sparse_x.backward(sparse_out_grad)
correct_values_grad = [0, 3]
assert np.array_equal(correct_values_grad, values.grad.numpy())
# test the non-zero values is a vector
values = [[1, 1], [2, 2]]
values = paddle.to_tensor(
values, dtype='float32', stop_gradient=False
)
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=[2, 2, 2], stop_gradient=False
)
grad_values = [[2, 2], [3, 3]]
grad_values = paddle.to_tensor(grad_values, dtype='float32')
sparse_out_grad = paddle.sparse.sparse_coo_tensor(
grad_indices, grad_values, shape=[2, 2, 2]
)
sparse_x.backward(sparse_out_grad)
correct_values_grad = [[0, 0], [3, 3]]
assert np.array_equal(correct_values_grad, values.grad.numpy())
def test_sparse_coo_tensor_sorted(self):
with _test_eager_guard():
for device in devices:
if device == 'cpu' or (
device == 'gpu' and paddle.is_compiled_with_cuda()
):
paddle.device.set_device(device)
# test unsorted and duplicate indices
indices = [[1, 0, 0], [0, 1, 1]]
values = [1.0, 2.0, 3.0]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
sparse_x = paddle.sparse.coalesce(sparse_x)
indices_sorted = [[0, 1], [1, 0]]
values_sorted = [5.0, 1.0]
assert np.array_equal(
indices_sorted, sparse_x.indices().numpy()
)
assert np.array_equal(
values_sorted, sparse_x.values().numpy()
)
# test the non-zero values is a vector
values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
sparse_x = paddle.sparse.coalesce(sparse_x)
values_sorted = [[5.0, 5.0], [1.0, 1.0]]
assert np.array_equal(
indices_sorted, sparse_x.indices().numpy()
)
assert np.array_equal(
values_sorted, sparse_x.values().numpy()
)
for device in devices:
if device == 'cpu' or (
device == 'gpu' and paddle.is_compiled_with_cuda()
):
paddle.device.set_device(device)
# test unsorted and duplicate indices
indices = [[1, 0, 0], [0, 1, 1]]
values = [1.0, 2.0, 3.0]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
sparse_x = paddle.sparse.coalesce(sparse_x)
indices_sorted = [[0, 1], [1, 0]]
values_sorted = [5.0, 1.0]
assert np.array_equal(
indices_sorted, sparse_x.indices().numpy()
)
assert np.array_equal(values_sorted, sparse_x.values().numpy())
# test the non-zero values is a vector
values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
sparse_x = paddle.sparse.coalesce(sparse_x)
values_sorted = [[5.0, 5.0], [1.0, 1.0]]
assert np.array_equal(
indices_sorted, sparse_x.indices().numpy()
)
assert np.array_equal(values_sorted, sparse_x.values().numpy())
def test_batch_csr(self):
with _test_eager_guard():
def verify(dense_x):
sparse_x = dense_x.to_sparse_csr()
out = sparse_x.to_dense()
assert np.allclose(out.numpy(), dense_x.numpy())
shape = np.random.randint(low=1, high=10, size=3)
shape = list(shape)
dense_x = paddle.randn(shape)
dense_x = paddle.nn.functional.dropout(dense_x, p=0.5)
verify(dense_x)
# test batchs=1
shape[0] = 1
dense_x = paddle.randn(shape)
dense_x = paddle.nn.functional.dropout(dense_x, p=0.5)
verify(dense_x)
shape = np.random.randint(low=3, high=10, size=3)
shape = list(shape)
dense_x = paddle.randn(shape)
# set the 0th batch to zero
dense_x[0] = 0
verify(dense_x)
dense_x = paddle.randn(shape)
# set the 1th batch to zero
dense_x[1] = 0
verify(dense_x)
dense_x = paddle.randn(shape)
# set the 2th batch to zero
dense_x[2] = 0
verify(dense_x)
def verify(dense_x):
sparse_x = dense_x.to_sparse_csr()
out = sparse_x.to_dense()
assert np.allclose(out.numpy(), dense_x.numpy())
shape = np.random.randint(low=1, high=10, size=3)
shape = list(shape)
dense_x = paddle.randn(shape)
dense_x = paddle.nn.functional.dropout(dense_x, p=0.5)
verify(dense_x)
# test batchs=1
shape[0] = 1
dense_x = paddle.randn(shape)
dense_x = paddle.nn.functional.dropout(dense_x, p=0.5)
verify(dense_x)
shape = np.random.randint(low=3, high=10, size=3)
shape = list(shape)
dense_x = paddle.randn(shape)
# set the 0th batch to zero
dense_x[0] = 0
verify(dense_x)
dense_x = paddle.randn(shape)
# set the 1th batch to zero
dense_x[1] = 0
verify(dense_x)
dense_x = paddle.randn(shape)
# set the 2th batch to zero
dense_x[2] = 0
verify(dense_x)
class TestCooError(unittest.TestCase):
def test_small_shape(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
indices = [[2, 3], [0, 2]]
values = [1, 2]
# 1. the shape too small
dense_shape = [2, 2]
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=dense_shape
)
with self.assertRaises(ValueError):
indices = [[2, 3], [0, 2]]
values = [1, 2]
# 1. the shape too small
dense_shape = [2, 2]
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=dense_shape
)
def test_same_nnz(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
# 2. test the nnz of indices must same as nnz of values
indices = [[1, 2], [1, 0]]
values = [1, 2, 3]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
with self.assertRaises(ValueError):
# 2. test the nnz of indices must same as nnz of values
indices = [[1, 2], [1, 0]]
values = [1, 2, 3]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
def test_same_dimensions(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
indices = [[1, 2], [1, 0]]
values = [1, 2, 3]
shape = [2, 3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=shape
)
with self.assertRaises(ValueError):
indices = [[1, 2], [1, 0]]
values = [1, 2, 3]
shape = [2, 3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=shape
)
def test_indices_dtype(self):
with _test_eager_guard():
with self.assertRaises(TypeError):
indices = [[1.0, 2.0], [0, 1]]
values = [1, 2]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
with self.assertRaises(TypeError):
indices = [[1.0, 2.0], [0, 1]]
values = [1, 2]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
class TestCsrError(unittest.TestCase):
def test_dimension1(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
def test_dimension2(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3, 3, 3, 3]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3, 3, 3, 3]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
def test_same_shape1(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2, 3]
values = [1, 2, 3]
shape = [3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2, 3]
values = [1, 2, 3]
shape = [3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
def test_same_shape2(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2, 3]
values = [1, 2, 3, 4]
shape = [3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3]
cols = [0, 1, 2, 3]
values = [1, 2, 3, 4]
shape = [3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
def test_same_shape3(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3, 0, 1, 2]
cols = [0, 1, 2, 3, 0, 1, 2]
values = [1, 2, 3, 4, 0, 1, 2]
shape = [2, 3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
with self.assertRaises(ValueError):
crows = [0, 1, 2, 3, 0, 1, 2]
cols = [0, 1, 2, 3, 0, 1, 2]
values = [1, 2, 3, 4, 0, 1, 2]
shape = [2, 3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
def test_crows_first_value(self):
with _test_eager_guard():
with self.assertRaises(ValueError):
crows = [1, 1, 2, 3]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
with self.assertRaises(ValueError):
crows = [1, 1, 2, 3]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3, 4]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
def test_dtype(self):
with _test_eager_guard():
with self.assertRaises(TypeError):
crows = [0, 1, 2, 3.0]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
with self.assertRaises(TypeError):
crows = [0, 1, 2, 3.0]
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3]
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape
)
if __name__ == "__main__":
......
......@@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestSplitOp(OpTest):
......@@ -453,24 +452,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out = x1.numpy()
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
with _test_eager_guard():
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(
input.gradient(), manul_grad, rtol=1e-05
)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
......@@ -486,24 +482,21 @@ class API_TestDygraphFluidSplit(unittest.TestCase):
x1_out = x1.numpy()
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
with _test_eager_guard():
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, [2, 2, 2], dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(
input.gradient(), manul_grad, rtol=1e-05
)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = fluid.layers.split(input, [2, 2, 2], dim=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
......@@ -522,24 +515,21 @@ class API_TestDygraphSplit(unittest.TestCase):
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
with _test_eager_guard():
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(
input.gradient(), manul_grad, rtol=1e-05
)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
# input is a variable which shape is [4, 6, 6]
input = paddle.to_tensor(input_1)
input.stop_gradient = False
x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
eager_x0_out = x0.numpy()
eager_x1_out = x1.numpy()
eager_x2_out = x2.numpy()
loss = x0.sum()
loss.backward()
manul_grad = np.zeros_like(input_1)
manul_grad[:, :2, :] = 1
np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
......@@ -570,12 +560,11 @@ class API_TestDygraphSplit(unittest.TestCase):
out_dy_np = out_dy.numpy()
ex_out = np.split(input_1, [6], axis=1)
ex_out = ex_out[0]
with _test_eager_guard():
input = paddle.to_tensor(input_1)
out_eager = paddle.split(input, [6], axis=1)
out_eager = out_eager[0]
out_eager_np = out_dy.numpy()
np.testing.assert_allclose(ex_out, out_eager_np, rtol=1e-05)
input = paddle.to_tensor(input_1)
out_eager = paddle.split(input, [6], axis=1)
out_eager = out_eager[0]
out_eager_np = out_dy.numpy()
np.testing.assert_allclose(ex_out, out_eager_np, rtol=1e-05)
np.testing.assert_allclose(ex_out, out_dy_np, rtol=1e-05)
def test_out_tensor_input(self):
......@@ -612,7 +601,7 @@ class API_TestDygraphSplit(unittest.TestCase):
np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
def func_negative_one_section(self):
def test_negative_one_section(self):
with fluid.dygraph.guard():
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
......@@ -622,11 +611,6 @@ class API_TestDygraphSplit(unittest.TestCase):
x0_out = x0[0].numpy()
np.testing.assert_array_equal(x0_out, input.numpy())
def test_negative_one_section(self):
with _test_eager_guard():
self.func_negative_one_section()
self.func_negative_one_section()
class API_TestEmptySplit(unittest.TestCase):
def test_axis_input_empty_section(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册