未验证 提交 1c8ef38e 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

[Sparse] change paddle.incubate.sparse to paddle.sparse (#47152)

上级 f61f9e76
......@@ -82,6 +82,7 @@ import paddle.static # noqa: F401
import paddle.vision # noqa: F401
import paddle.audio # noqa: F401
import paddle.geometric # noqa: F401
import paddle.sparse # noqa: F401
from .tensor.attribute import is_complex # noqa: F401
from .tensor.attribute import is_integer # noqa: F401
......
......@@ -908,7 +908,7 @@ def monkey_patch_varbase():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
print(sparse_x.values())
#[1, 2, 3, 4, 5]
"""
......@@ -933,7 +933,7 @@ def monkey_patch_varbase():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
dense_x = sparse_x.to_dense()
#[[0., 1., 0., 2.],
# [0., 0., 3., 0.],
......
......@@ -62,7 +62,7 @@ class TestAddmm(unittest.TestCase):
sp_x.stop_gradient = False
sp_y = origin_y.detach()
sp_y.stop_gradient = False
sp_out = paddle.incubate.sparse.addmm(sp_input, sp_x, sp_y, 3.0, 2.0)
sp_out = paddle.sparse.addmm(sp_input, sp_x, sp_y, 3.0, 2.0)
np.testing.assert_allclose(sp_out.numpy(),
dense_out.numpy(),
......
......@@ -17,7 +17,7 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
import paddle.incubate.sparse as sparse
import paddle.sparse as sparse
class TestSparseConv(unittest.TestCase):
......@@ -42,17 +42,17 @@ class TestSparseConv(unittest.TestCase):
correct_out_values = [[5], [11]]
sparse_input = core.eager.sparse_coo_tensor(indices, values,
dense_shape, False)
out = paddle.incubate.sparse.nn.functional.conv3d(
sparse_input,
dense_kernel,
bias=paddle.to_tensor(bias, dtype='float32'),
stride=strides,
padding=paddings,
dilation=dilations,
groups=1,
data_format="NDHWC")
out = paddle.sparse.nn.functional.conv3d(sparse_input,
dense_kernel,
bias=paddle.to_tensor(
bias, dtype='float32'),
stride=strides,
padding=paddings,
dilation=dilations,
groups=1,
data_format="NDHWC")
out.backward(out)
out = paddle.incubate.sparse.coalesce(out)
out = paddle.sparse.coalesce(out)
assert np.array_equal(correct_out_values, out.values().numpy())
def test_subm_conv3d(self):
......@@ -62,11 +62,14 @@ class TestSparseConv(unittest.TestCase):
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
indices, values, dense_shape, stop_gradient=True)
sparse_x = paddle.sparse.sparse_coo_tensor(indices,
values,
dense_shape,
stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.incubate.sparse.nn.functional.subm_conv3d(
sparse_x, weight, key='subm_conv')
y = paddle.sparse.nn.functional.subm_conv3d(sparse_x,
weight,
key='subm_conv')
assert np.array_equal(sparse_x.indices().numpy(),
y.indices().numpy())
......@@ -80,17 +83,20 @@ class TestSparseConv(unittest.TestCase):
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
correct_out_values = [[4], [10]]
sparse_input = paddle.incubate.sparse.sparse_coo_tensor(
sparse_input = paddle.sparse.sparse_coo_tensor(
indices, values, dense_shape, False)
sparse_conv3d = paddle.incubate.sparse.nn.Conv3D(
1, 1, (1, 3, 3), data_format='NDHWC')
sparse_conv3d = paddle.sparse.nn.Conv3D(1,
1, (1, 3, 3),
data_format='NDHWC')
sparse_out = sparse_conv3d(sparse_input)
#test errors
with self.assertRaises(ValueError):
#Currently, only support data_format='NDHWC'
conv3d = paddle.incubate.sparse.nn.SubmConv3D(
1, 1, (1, 3, 3), data_format='NCDHW', key='subm_conv')
conv3d = paddle.sparse.nn.SubmConv3D(1,
1, (1, 3, 3),
data_format='NCDHW',
key='subm_conv')
def test_SubmConv3D(self):
with _test_eager_guard():
......@@ -100,11 +106,13 @@ class TestSparseConv(unittest.TestCase):
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
correct_out_values = [[4], [10]]
sparse_input = paddle.incubate.sparse.sparse_coo_tensor(
sparse_input = paddle.sparse.sparse_coo_tensor(
indices, values, dense_shape, False)
subm_conv3d = paddle.incubate.sparse.nn.SubmConv3D(
1, 1, (1, 3, 3), data_format='NDHWC', key='subm_conv')
subm_conv3d = paddle.sparse.nn.SubmConv3D(1,
1, (1, 3, 3),
data_format='NDHWC',
key='subm_conv')
# test extra_repr
print(subm_conv3d.extra_repr())
......@@ -115,8 +123,10 @@ class TestSparseConv(unittest.TestCase):
#test errors
with self.assertRaises(ValueError):
#Currently, only support data_format='NDHWC'
conv3d = paddle.incubate.sparse.nn.SubmConv3D(
1, 1, (1, 3, 3), data_format='NCDHW', key='subm_conv')
conv3d = paddle.sparse.nn.SubmConv3D(1,
1, (1, 3, 3),
data_format='NCDHW',
key='subm_conv')
def test_Conv3D_bias(self):
with _test_eager_guard():
......@@ -126,10 +136,7 @@ class TestSparseConv(unittest.TestCase):
sp_x = x.to_sparse_coo(4)
conv3d = paddle.nn.Conv3D(3, 2, 3, data_format='NDHWC')
sp_conv3d = paddle.incubate.sparse.nn.Conv3D(3,
2,
3,
data_format='NDHWC')
sp_conv3d = paddle.sparse.nn.Conv3D(3, 2, 3, data_format='NDHWC')
sp_conv3d.weight.set_value(
paddle.to_tensor(conv3d.weight.numpy().transpose(2, 3, 4, 1,
0)))
......
......@@ -17,20 +17,20 @@ from operator import __add__, __sub__, __mul__, __truediv__
import numpy as np
import paddle
import paddle.incubate.sparse as sparse
import paddle.sparse as sparse
op_list = [__add__, __sub__, __mul__, __truediv__]
def get_actual_res(x, y, op):
if op == __add__:
res = paddle.incubate.sparse.add(x, y)
res = paddle.sparse.add(x, y)
elif op == __sub__:
res = paddle.incubate.sparse.subtract(x, y)
res = paddle.sparse.subtract(x, y)
elif op == __mul__:
res = paddle.incubate.sparse.multiply(x, y)
res = paddle.sparse.multiply(x, y)
elif op == __truediv__:
res = paddle.incubate.sparse.divide(x, y)
res = paddle.sparse.divide(x, y)
else:
raise ValueError("unsupported op")
return res
......
......@@ -92,7 +92,7 @@ class TestSparseAttentionAPI1(unittest.TestCase):
output = paddle.matmul(softmax, value)
output.backward()
output_sp = paddle.incubate.sparse.nn.functional.attention(
output_sp = paddle.sparse.nn.functional.attention(
query_sp, key_sp, value_sp, sp_mask, kp_mask, attn_mask)
output_sp.backward()
else:
......@@ -103,7 +103,7 @@ class TestSparseAttentionAPI1(unittest.TestCase):
output = paddle.matmul(softmax, value)
output.backward()
output_sp = paddle.incubate.sparse.nn.functional.attention(
output_sp = paddle.sparse.nn.functional.attention(
query_sp, key_sp, value_sp, sp_mask)
output_sp.backward()
......
......@@ -15,12 +15,12 @@
import unittest
import paddle
from paddle.incubate.sparse.binary import is_same_shape
from paddle.sparse.binary import is_same_shape
class TestSparseIsSameShapeAPI(unittest.TestCase):
"""
test paddle.incubate.sparse.is_same_shape
test paddle.sparse.is_same_shape
"""
def setUp(self):
......
......@@ -57,7 +57,7 @@ class TestMatmul(unittest.TestCase):
sp_x.stop_gradient = False
sp_y = origin_y.detach()
sp_y.stop_gradient = False
sp_out = paddle.incubate.sparse.matmul(sp_x, sp_y)
sp_out = paddle.sparse.matmul(sp_x, sp_y)
np.testing.assert_allclose(sp_out.numpy(),
dense_out.numpy(),
......@@ -106,7 +106,7 @@ class TestMaskedMatmul(unittest.TestCase):
x = paddle.to_tensor(np_x, stop_gradient=False)
y = paddle.to_tensor(np_y, stop_gradient=False)
mask = paddle.to_tensor(np.ones([10, 6]) * np_mask).to_sparse_csr()
out = paddle.incubate.sparse.masked_matmul(x, y, mask)
out = paddle.sparse.masked_matmul(x, y, mask)
np.testing.assert_allclose(np_out.indptr,
out.crows().numpy(),
......@@ -144,7 +144,7 @@ class TestMaskedMatmul(unittest.TestCase):
sp_x.stop_gradient = False
sp_y = origin_y.detach()
sp_y.stop_gradient = False
sp_out = paddle.incubate.sparse.matmul(sp_x, sp_y)
sp_out = paddle.sparse.matmul(sp_x, sp_y)
sp_out.backward()
np.testing.assert_allclose(sp_out.numpy(),
......
......@@ -15,8 +15,7 @@
import unittest
import numpy as np
import paddle
from paddle.incubate import sparse
from paddle.incubate.sparse import nn
from paddle.sparse import nn
from paddle.fluid.framework import _test_eager_guard
......@@ -26,10 +25,10 @@ class TestGradientAdd(unittest.TestCase):
indentity = sp_x
out = nn.functional.relu(sp_x)
values = out.values() + indentity.values()
out = sparse.sparse_coo_tensor(out.indices(),
values,
shape=out.shape,
stop_gradient=out.stop_gradient)
out = paddle.sparse.sparse_coo_tensor(out.indices(),
values,
shape=out.shape,
stop_gradient=out.stop_gradient)
return out
def dense(self, x):
......@@ -55,7 +54,6 @@ class TestGradientAdd(unittest.TestCase):
sparse_loss.backward(retain_graph=True)
assert np.allclose(dense_out.numpy(), sparse_out.to_dense().numpy())
assert np.allclose(loss.numpy(), loss.numpy())
assert np.allclose(x.grad.numpy(), sparse_x.grad.to_dense().numpy())
loss.backward()
......
......@@ -59,7 +59,7 @@ class TestCsrMv(unittest.TestCase):
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.incubate.sparse.mv(sp_x, sp_vec)
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(sp_out.numpy(),
......@@ -97,7 +97,7 @@ class TestCooMv(unittest.TestCase):
sp_x.stop_gradient = False
sp_vec = origin_vec.detach()
sp_vec.stop_gradient = False
sp_out = paddle.incubate.sparse.mv(sp_x, sp_vec)
sp_out = paddle.sparse.mv(sp_x, sp_vec)
sp_out.backward()
np.testing.assert_allclose(sp_out.numpy(),
......
......@@ -15,8 +15,8 @@
import unittest
import numpy as np
import paddle
from paddle.incubate.sparse import nn
import paddle.incubate.sparse as sparse
from paddle.sparse import nn
import paddle.sparse as sparse
import paddle.fluid as fluid
import copy
......@@ -40,7 +40,7 @@ class TestSparseBatchNorm(unittest.TestCase):
dense_x2 = copy.deepcopy(dense_x)
dense_x2.stop_gradient = False
sparse_x = dense_x2.to_sparse_coo(sparse_dim)
sparse_batch_norm = paddle.incubate.sparse.nn.BatchNorm(channels)
sparse_batch_norm = paddle.sparse.nn.BatchNorm(channels)
# set same params
sparse_batch_norm._mean.set_value(batch_norm._mean)
sparse_batch_norm._variance.set_value(batch_norm._variance)
......@@ -66,8 +66,8 @@ class TestSparseBatchNorm(unittest.TestCase):
shape = [2, 3, 6, 6, 3]
x = paddle.randn(shape)
sparse_x = x.to_sparse_coo(4)
sparse_batch_norm = paddle.incubate.sparse.nn.BatchNorm(
3, data_format='NCDHW')
sparse_batch_norm = paddle.sparse.nn.BatchNorm(3,
data_format='NCDHW')
sparse_batch_norm(sparse_x)
def test2(self):
......@@ -76,7 +76,7 @@ class TestSparseBatchNorm(unittest.TestCase):
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
dense_x = paddle.to_tensor(x_data)
sparse_x = dense_x.to_sparse_coo(4)
batch_norm = paddle.incubate.sparse.nn.BatchNorm(channels)
batch_norm = paddle.sparse.nn.BatchNorm(channels)
batch_norm_out = batch_norm(sparse_x)
dense_bn = paddle.nn.BatchNorm1D(channels)
dense_x = dense_x.reshape((-1, dense_x.shape[-1]))
......@@ -132,7 +132,7 @@ class TestStatic(unittest.TestCase):
dense_shape = [1, 1, 3, 4, channels]
sp_x = sparse.sparse_coo_tensor(indices, values, dense_shape)
sparse_batch_norm = paddle.incubate.sparse.nn.BatchNorm(channels)
sparse_batch_norm = paddle.sparse.nn.BatchNorm(channels)
sp_y = sparse_batch_norm(sp_x)
out = sp_y.to_dense()
......
......@@ -45,7 +45,7 @@ class TestMaxPool3DFunc(unittest.TestCase):
self.setUp()
self.dense_x.stop_gradient = False
sparse_x = self.dense_x.to_sparse_coo(4)
sparse_out = paddle.incubate.sparse.nn.functional.max_pool3d(
sparse_out = paddle.sparse.nn.functional.max_pool3d(
sparse_x,
self.kernel_sizes,
stride=self.strides,
......@@ -106,8 +106,8 @@ class TestMaxPool3DAPI(unittest.TestCase):
with _test_eager_guard():
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.incubate.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC')
max_pool3d = paddle.sparse.nn.MaxPool3D(kernel_size=3,
data_format='NDHWC')
out = max_pool3d(sparse_x)
out = out.to_dense()
......
......@@ -19,7 +19,7 @@ import unittest
class TestReshape(unittest.TestCase):
"""
Test the API paddle.incubate.sparse.reshape on some sparse tensors.
Test the API paddle.sparse.reshape on some sparse tensors.
x: sparse, out: sparse
"""
......@@ -31,7 +31,7 @@ class TestReshape(unittest.TestCase):
Transform a sparse tensor with shape "x_shape" to
a sparse tensor with shape "new_shape".
Compare the output of paddle.reshape and the output of
paddle.incubate.sparse.reshape.
paddle.sparse.reshape.
"""
mask = np.random.randint(0, 2, x_shape)
np_x = np.random.randint(-100, 100, x_shape) * mask
......@@ -49,7 +49,7 @@ class TestReshape(unittest.TestCase):
sp_x = paddle.to_tensor(np_x,
place=paddle.CPUPlace()).to_sparse_csr()
sp_x.stop_gradient = False
sp_out = paddle.incubate.sparse.reshape(sp_x, new_shape)
sp_out = paddle.sparse.reshape(sp_x, new_shape)
np.testing.assert_allclose(sp_out.to_dense().numpy(),
dense_out.numpy(),
......@@ -75,7 +75,7 @@ class TestReshape(unittest.TestCase):
sp_x = paddle.to_tensor(
np_x, place=paddle.CUDAPlace(0)).to_sparse_csr()
sp_x.stop_gradient = False
sp_out = paddle.incubate.sparse.reshape(sp_x, new_shape)
sp_out = paddle.sparse.reshape(sp_x, new_shape)
np.testing.assert_allclose(sp_out.to_dense().numpy(),
dense_out.numpy(),
......
......@@ -44,7 +44,7 @@ class TestCsrSoftmax(unittest.TestCase):
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.incubate.sparse.nn.Softmax()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(out.crows().numpy(),
np_csr.indptr,
......@@ -101,7 +101,7 @@ class TestCsrSoftmax(unittest.TestCase):
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.incubate.sparse.nn.Softmax()
m = paddle.sparse.nn.Softmax()
out = m(csr)
np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05)
......
......@@ -35,7 +35,7 @@ class TestTranspose(unittest.TestCase):
else:
sp_x = origin_x.detach().to_sparse_csr()
sp_x.stop_gradient = False
sp_out = paddle.incubate.sparse.transpose(sp_x, dims)
sp_out = paddle.sparse.transpose(sp_x, dims)
np.testing.assert_allclose(sp_out.to_dense().numpy(),
dense_out.numpy(),
......
......@@ -90,84 +90,79 @@ class TestSparseUnary(unittest.TestCase):
self.check_result(dense_func, sparse_func, 'csr', attr1, attr2)
def test_sparse_sin(self):
self.compare_with_dense(paddle.sin, paddle.incubate.sparse.sin)
self.compare_with_dense(paddle.sin, paddle.sparse.sin)
def test_sparse_tan(self):
self.compare_with_dense(paddle.tan, paddle.incubate.sparse.tan)
self.compare_with_dense(paddle.tan, paddle.sparse.tan)
def test_sparse_asin(self):
self.compare_with_dense(paddle.asin, paddle.incubate.sparse.asin)
self.compare_with_dense(paddle.asin, paddle.sparse.asin)
def test_sparse_atan(self):
self.compare_with_dense(paddle.atan, paddle.incubate.sparse.atan)
self.compare_with_dense(paddle.atan, paddle.sparse.atan)
def test_sparse_sinh(self):
self.compare_with_dense(paddle.sinh, paddle.incubate.sparse.sinh)
self.compare_with_dense(paddle.sinh, paddle.sparse.sinh)
def test_sparse_tanh(self):
self.compare_with_dense(paddle.tanh, paddle.incubate.sparse.tanh)
self.compare_with_dense(paddle.tanh, paddle.sparse.tanh)
def test_sparse_asinh(self):
self.compare_with_dense(paddle.asinh, paddle.incubate.sparse.asinh)
self.compare_with_dense(paddle.asinh, paddle.sparse.asinh)
def test_sparse_atanh(self):
self.compare_with_dense(paddle.atanh, paddle.incubate.sparse.atanh)
self.compare_with_dense(paddle.atanh, paddle.sparse.atanh)
def test_sparse_sqrt(self):
self.compare_with_dense(paddle.sqrt, paddle.incubate.sparse.sqrt)
self.compare_with_dense(paddle.sqrt, paddle.sparse.sqrt)
def test_sparse_square(self):
self.compare_with_dense(paddle.square, paddle.incubate.sparse.square)
self.compare_with_dense(paddle.square, paddle.sparse.square)
def test_sparse_log1p(self):
self.compare_with_dense(paddle.log1p, paddle.incubate.sparse.log1p)
self.compare_with_dense(paddle.log1p, paddle.sparse.log1p)
def test_sparse_relu(self):
self.compare_with_dense(paddle.nn.ReLU(),
paddle.incubate.sparse.nn.ReLU())
self.compare_with_dense(paddle.nn.ReLU(), paddle.sparse.nn.ReLU())
def test_sparse_relu6(self):
self.compare_with_dense(paddle.nn.ReLU6(),
paddle.incubate.sparse.nn.ReLU6())
self.compare_with_dense(paddle.nn.ReLU6(), paddle.sparse.nn.ReLU6())
def test_sparse_leaky_relu(self):
self.compare_with_dense(paddle.nn.LeakyReLU(0.1),
paddle.incubate.sparse.nn.LeakyReLU(0.1))
paddle.sparse.nn.LeakyReLU(0.1))
def test_sparse_abs(self):
self.compare_with_dense(paddle.abs, paddle.incubate.sparse.abs)
self.compare_with_dense(paddle.abs, paddle.sparse.abs)
def test_sparse_expm1(self):
self.compare_with_dense(paddle.expm1, paddle.incubate.sparse.expm1)
self.compare_with_dense(paddle.expm1, paddle.sparse.expm1)
def test_sparse_deg2rad(self):
self.compare_with_dense(paddle.deg2rad, paddle.incubate.sparse.deg2rad)
self.compare_with_dense(paddle.deg2rad, paddle.sparse.deg2rad)
def test_sparse_rad2deg(self):
self.compare_with_dense(paddle.rad2deg, paddle.incubate.sparse.rad2deg)
self.compare_with_dense(paddle.rad2deg, paddle.sparse.rad2deg)
def test_sparse_neg(self):
self.compare_with_dense(paddle.neg, paddle.incubate.sparse.neg)
self.compare_with_dense(paddle.neg, paddle.sparse.neg)
def test_sparse_pow(self):
self.compare_with_dense_one_attr(paddle.pow, paddle.incubate.sparse.pow,
3)
self.compare_with_dense_one_attr(paddle.pow, paddle.sparse.pow, 3)
def test_sparse_mul_scalar(self):
self.compare_with_dense_one_attr(paddle.Tensor.__mul__,
paddle.incubate.sparse.multiply, 3)
paddle.sparse.multiply, 3)
def test_sparse_div_scalar(self):
self.compare_with_dense_one_attr(paddle.Tensor.__div__,
paddle.incubate.sparse.divide, 2)
paddle.sparse.divide, 2)
def test_sparse_cast(self):
self.compare_with_dense_two_attr(paddle.cast,
paddle.incubate.sparse.cast, 'int32',
'float32')
self.compare_with_dense_two_attr(paddle.cast,
paddle.incubate.sparse.cast, 'int32',
'float64')
self.compare_with_dense_two_attr(paddle.cast, paddle.sparse.cast,
'int32', 'float32')
self.compare_with_dense_two_attr(paddle.cast, paddle.sparse.cast,
'int32', 'float64')
if __name__ == "__main__":
......
......@@ -31,10 +31,10 @@ class TestSparseCreate(unittest.TestCase):
dense_shape = [3, 4]
dense_indices = paddle.to_tensor(indices)
dense_elements = paddle.to_tensor(values, dtype='float32')
coo = paddle.incubate.sparse.sparse_coo_tensor(dense_indices,
dense_elements,
dense_shape,
stop_gradient=False)
coo = paddle.sparse.sparse_coo_tensor(dense_indices,
dense_elements,
dense_shape,
stop_gradient=False)
# test the to_string.py
assert np.array_equal(indices, coo.indices().numpy())
assert np.array_equal(values, coo.values().numpy())
......@@ -44,8 +44,7 @@ class TestSparseCreate(unittest.TestCase):
indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0]
dense_shape = [3, 3]
coo = paddle.incubate.sparse.sparse_coo_tensor(
indices, values, dense_shape)
coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
assert np.array_equal(3, coo.nnz())
assert np.array_equal(indices, coo.indices().numpy())
assert np.array_equal(values, coo.values().numpy())
......@@ -60,12 +59,11 @@ class TestSparseCreate(unittest.TestCase):
dense_cols = paddle.to_tensor(cols)
dense_elements = paddle.to_tensor(values, dtype='float32')
stop_gradient = False
csr = paddle.incubate.sparse.sparse_csr_tensor(
dense_crows,
dense_cols,
dense_elements,
dense_shape,
stop_gradient=stop_gradient)
csr = paddle.sparse.sparse_csr_tensor(dense_crows,
dense_cols,
dense_elements,
dense_shape,
stop_gradient=stop_gradient)
def test_create_csr_by_np(self):
with _test_eager_guard():
......@@ -73,8 +71,8 @@ class TestSparseCreate(unittest.TestCase):
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
csr = paddle.incubate.sparse.sparse_csr_tensor(
crows, cols, values, dense_shape)
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values,
dense_shape)
# test the to_string.py
assert np.array_equal(5, csr.nnz())
assert np.array_equal(crows, csr.crows().numpy())
......@@ -87,10 +85,10 @@ class TestSparseCreate(unittest.TestCase):
indices = [[0, 1], [0, 1]]
values = [1.0, 2.0]
dense_shape = [2, 2]
coo = paddle.incubate.sparse.sparse_coo_tensor(indices,
values,
dense_shape,
place=place)
coo = paddle.sparse.sparse_coo_tensor(indices,
values,
dense_shape,
place=place)
assert coo.place.is_cpu_place()
assert coo.values().place.is_cpu_place()
assert coo.indices().place.is_cpu_place()
......@@ -98,10 +96,10 @@ class TestSparseCreate(unittest.TestCase):
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
csr = paddle.incubate.sparse.sparse_csr_tensor(crows,
cols,
values, [3, 5],
place=place)
csr = paddle.sparse.sparse_csr_tensor(crows,
cols,
values, [3, 5],
place=place)
assert csr.place.is_cpu_place()
assert csr.crows().place.is_cpu_place()
assert csr.cols().place.is_cpu_place()
......@@ -114,19 +112,19 @@ class TestSparseCreate(unittest.TestCase):
dense_shape = [2, 2]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
coo = paddle.incubate.sparse.sparse_coo_tensor(indices,
values,
dense_shape,
dtype='float64')
coo = paddle.sparse.sparse_coo_tensor(indices,
values,
dense_shape,
dtype='float64')
assert coo.dtype == paddle.float64
crows = [0, 2, 3, 5]
cols = [1, 3, 2, 0, 1]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
csr = paddle.incubate.sparse.sparse_csr_tensor(crows,
cols,
values, [3, 5],
dtype='float16')
csr = paddle.sparse.sparse_csr_tensor(crows,
cols,
values, [3, 5],
dtype='float16')
assert csr.dtype == paddle.float16
def test_create_coo_no_shape(self):
......@@ -135,7 +133,7 @@ class TestSparseCreate(unittest.TestCase):
values = [1.0, 2.0]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
coo = paddle.incubate.sparse.sparse_coo_tensor(indices, values)
coo = paddle.sparse.sparse_coo_tensor(indices, values)
assert [2, 2] == coo.shape
......@@ -153,7 +151,7 @@ class TestSparseConvert(unittest.TestCase):
#test to_sparse_coo_grad backward
out_grad_indices = [[0, 1], [0, 1]]
out_grad_values = [2.0, 3.0]
out_grad = paddle.incubate.sparse.sparse_coo_tensor(
out_grad = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(out_grad_indices),
paddle.to_tensor(out_grad_values),
shape=out.shape,
......@@ -169,7 +167,7 @@ class TestSparseConvert(unittest.TestCase):
values = [1.0, 2.0, 3.0, 4.0, 5.0]
indices_dtypes = ['int32', 'int64']
for indices_dtype in indices_dtypes:
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices, dtype=indices_dtype),
paddle.to_tensor(values),
shape=[3, 4],
......@@ -185,7 +183,7 @@ class TestSparseConvert(unittest.TestCase):
sparse_x.grad.values().numpy())
paddle.device.set_device("cpu")
sparse_x_cpu = paddle.incubate.sparse.sparse_coo_tensor(
sparse_x_cpu = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices, dtype=indices_dtype),
paddle.to_tensor(values),
shape=[3, 4],
......@@ -216,7 +214,7 @@ class TestSparseConvert(unittest.TestCase):
with _test_eager_guard():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1.0, 2.0, 3.0, 4.0, 5.0]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices),
paddle.to_tensor(values),
shape=[3, 4],
......@@ -229,7 +227,7 @@ class TestSparseConvert(unittest.TestCase):
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0],
[5.0, 5.0]]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
sparse_x = paddle.sparse.sparse_coo_tensor(
paddle.to_tensor(indices),
paddle.to_tensor(values),
shape=[3, 4, 2],
......@@ -254,13 +252,13 @@ class TestSparseConvert(unittest.TestCase):
values = paddle.to_tensor(values,
dtype='float32',
stop_gradient=False)
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=[2, 2], stop_gradient=False)
grad_indices = [[0, 1], [1, 1]]
grad_values = [2, 3]
grad_indices = paddle.to_tensor(grad_indices, dtype='int32')
grad_values = paddle.to_tensor(grad_values, dtype='float32')
sparse_out_grad = paddle.incubate.sparse.sparse_coo_tensor(
sparse_out_grad = paddle.sparse.sparse_coo_tensor(
grad_indices, grad_values, shape=[2, 2])
sparse_x.backward(sparse_out_grad)
correct_values_grad = [0, 3]
......@@ -272,11 +270,11 @@ class TestSparseConvert(unittest.TestCase):
values = paddle.to_tensor(values,
dtype='float32',
stop_gradient=False)
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
sparse_x = paddle.sparse.sparse_coo_tensor(
indices, values, shape=[2, 2, 2], stop_gradient=False)
grad_values = [[2, 2], [3, 3]]
grad_values = paddle.to_tensor(grad_values, dtype='float32')
sparse_out_grad = paddle.incubate.sparse.sparse_coo_tensor(
sparse_out_grad = paddle.sparse.sparse_coo_tensor(
grad_indices, grad_values, shape=[2, 2, 2])
sparse_x.backward(sparse_out_grad)
correct_values_grad = [[0, 0], [3, 3]]
......@@ -294,9 +292,8 @@ class TestSparseConvert(unittest.TestCase):
values = [1.0, 2.0, 3.0]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
indices, values)
sparse_x = paddle.incubate.sparse.coalesce(sparse_x)
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
sparse_x = paddle.sparse.coalesce(sparse_x)
indices_sorted = [[0, 1], [1, 0]]
values_sorted = [5.0, 1.0]
assert np.array_equal(indices_sorted,
......@@ -307,9 +304,8 @@ class TestSparseConvert(unittest.TestCase):
# test the non-zero values is a vector
values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
indices, values)
sparse_x = paddle.incubate.sparse.coalesce(sparse_x)
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
sparse_x = paddle.sparse.coalesce(sparse_x)
values_sorted = [[5.0, 5.0], [1.0, 1.0]]
assert np.array_equal(indices_sorted,
sparse_x.indices().numpy())
......@@ -363,8 +359,9 @@ class TestCooError(unittest.TestCase):
values = [1, 2]
# 1. the shape too small
dense_shape = [2, 2]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
indices, values, shape=dense_shape)
sparse_x = paddle.sparse.sparse_coo_tensor(indices,
values,
shape=dense_shape)
def test_same_nnz(self):
with _test_eager_guard():
......@@ -372,8 +369,7 @@ class TestCooError(unittest.TestCase):
# 2. test the nnz of indices must same as nnz of values
indices = [[1, 2], [1, 0]]
values = [1, 2, 3]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
indices, values)
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
def test_same_dimensions(self):
with _test_eager_guard():
......@@ -381,17 +377,16 @@ class TestCooError(unittest.TestCase):
indices = [[1, 2], [1, 0]]
values = [1, 2, 3]
shape = [2, 3, 4]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(indices,
values,
shape=shape)
sparse_x = paddle.sparse.sparse_coo_tensor(indices,
values,
shape=shape)
def test_indices_dtype(self):
with _test_eager_guard():
with self.assertRaises(TypeError):
indices = [[1.0, 2.0], [0, 1]]
values = [1, 2]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(
indices, values)
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values)
class TestCsrError(unittest.TestCase):
......@@ -403,7 +398,7 @@ class TestCsrError(unittest.TestCase):
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3]
sparse_x = paddle.incubate.sparse.sparse_csr_tensor(
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape)
def test_dimension2(self):
......@@ -413,7 +408,7 @@ class TestCsrError(unittest.TestCase):
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3, 3, 3, 3]
sparse_x = paddle.incubate.sparse.sparse_csr_tensor(
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape)
def test_same_shape1(self):
......@@ -423,7 +418,7 @@ class TestCsrError(unittest.TestCase):
cols = [0, 1, 2, 3]
values = [1, 2, 3]
shape = [3, 4]
sparse_x = paddle.incubate.sparse.sparse_csr_tensor(
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape)
def test_same_shape2(self):
......@@ -433,7 +428,7 @@ class TestCsrError(unittest.TestCase):
cols = [0, 1, 2, 3]
values = [1, 2, 3, 4]
shape = [3, 4]
sparse_x = paddle.incubate.sparse.sparse_csr_tensor(
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape)
def test_same_shape3(self):
......@@ -443,7 +438,7 @@ class TestCsrError(unittest.TestCase):
cols = [0, 1, 2, 3, 0, 1, 2]
values = [1, 2, 3, 4, 0, 1, 2]
shape = [2, 3, 4]
sparse_x = paddle.incubate.sparse.sparse_csr_tensor(
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape)
def test_crows_first_value(self):
......@@ -453,7 +448,7 @@ class TestCsrError(unittest.TestCase):
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3, 4]
sparse_x = paddle.incubate.sparse.sparse_csr_tensor(
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape)
def test_dtype(self):
......@@ -463,7 +458,7 @@ class TestCsrError(unittest.TestCase):
cols = [0, 1, 2]
values = [1, 2, 3]
shape = [3]
sparse_x = paddle.incubate.sparse.sparse_csr_tensor(
sparse_x = paddle.sparse.sparse_csr_tensor(
crows, cols, values, shape)
......
......@@ -31,7 +31,6 @@ from .passes import fuse_resnet_unit_pass
from . import autograd #noqa: F401
from . import autotune #noqa: F401
from . import sparse #noqa: F401
from . import nn #noqa: F401
from . import asp #noqa: F401
......
......@@ -51,9 +51,36 @@ from .multiary import addmm
from . import nn
__all__ = [
'sparse_coo_tensor', 'sparse_csr_tensor', 'sin', 'tan', 'asin', 'atan',
'sinh', 'tanh', 'asinh', 'atanh', 'sqrt', 'square', 'log1p', 'abs', 'pow',
'cast', 'neg', 'deg2rad', 'rad2deg', 'expm1', 'mv', 'matmul',
'masked_matmul', 'addmm', 'add', 'subtract', 'transpose', 'multiply',
'divide', 'coalesce', 'is_same_shape', 'reshape'
'sparse_coo_tensor',
'sparse_csr_tensor',
'sin',
'tan',
'asin',
'atan',
'sinh',
'tanh',
'asinh',
'atanh',
'sqrt',
'square',
'log1p',
'abs',
'pow',
'cast',
'neg',
'deg2rad',
'rad2deg',
'expm1',
'mv',
'matmul',
'masked_matmul',
'addmm',
'add',
'subtract',
'transpose',
'multiply',
'divide',
'coalesce',
'is_same_shape',
'reshape',
]
......@@ -64,19 +64,20 @@ def matmul(x, y, name=None):
.. code-block:: python
# required: gpu
import paddle
# csr @ dense -> dense
crows = [0, 1, 2, 3]
cols = [1, 2, 0]
values = [1., 2., 3.]
csr = paddle.incubate.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
# Tensor(shape=[3, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 1, 2, 3],
# cols=[1, 2, 0],
# values=[1., 2., 3.])
dense = paddle.ones([3, 2])
out = paddle.incubate.sparse.matmul(csr, dense)
out = paddle.sparse.matmul(csr, dense)
# Tensor(shape=[3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1., 1.],
# [2., 2.],
......@@ -85,13 +86,13 @@ def matmul(x, y, name=None):
# coo @ dense -> dense
indices = [[0, 1, 2], [1, 2, 0]]
values = [1., 2., 3.]
coo = paddle.incubate.sparse.sparse_coo_tensor(indices, values, [3, 3])
coo = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3])
# Tensor(shape=[3, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# indices=[[0, 1, 2],
# [1, 2, 0]],
# values=[1., 2., 3.])
dense = paddle.ones([3, 2])
out = paddle.incubate.sparse.matmul(coo, dense)
out = paddle.sparse.matmul(coo, dense)
# Tensor(shape=[3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[1., 1.],
# [2., 2.],
......@@ -133,6 +134,7 @@ def masked_matmul(x, y, mask, name=None):
.. code-block:: python
# required: gpu
import paddle
paddle.seed(100)
......@@ -141,7 +143,7 @@ def masked_matmul(x, y, mask, name=None):
cols = [1, 3, 2, 0, 1]
values = [1., 2., 3., 4., 5.]
dense_shape = [3, 4]
mask = paddle.incubate.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
mask = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
# Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 3, 5],
# cols=[1, 3, 2, 0, 1],
......@@ -150,7 +152,7 @@ def masked_matmul(x, y, mask, name=None):
x = paddle.rand([3, 5])
y = paddle.rand([5, 4])
out = paddle.incubate.sparse.masked_matmul(x, y, mask)
out = paddle.sparse.masked_matmul(x, y, mask)
# Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 3, 5],
# cols=[1, 3, 2, 0, 1],
......@@ -191,6 +193,7 @@ def mv(x, vec, name=None):
.. code-block:: python
# required: gpu
import paddle
paddle.seed(100)
......@@ -199,14 +202,14 @@ def mv(x, vec, name=None):
cols = [1, 3, 2, 0, 1]
values = [1., 2., 3., 4., 5.]
dense_shape = [3, 4]
csr = paddle.incubate.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
# Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 3, 5],
# cols=[1, 3, 2, 0, 1],
# values=[1., 2., 3., 4., 5.])
vec = paddle.randn([4])
out = paddle.incubate.sparse.mv(csr, vec)
out = paddle.sparse.mv(csr, vec)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-3.85499096, -2.42975140, -1.75087738])
......@@ -245,7 +248,7 @@ def add(x, y, name=None):
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.incubate.sparse.add(sparse_x, sparse_y)
sparse_z = paddle.sparse.add(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ 0., -1., 0., 0.],
......@@ -302,7 +305,7 @@ def subtract(x, y, name=None):
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.incubate.sparse.subtract(sparse_x, sparse_y)
sparse_z = paddle.sparse.subtract(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ 0., -1., 0., 4.],
......@@ -347,7 +350,7 @@ def multiply(x, y, name=None):
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.incubate.sparse.multiply(sparse_x, sparse_y)
sparse_z = paddle.sparse.multiply(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ 0., 0., 0., -4.],
......@@ -395,7 +398,7 @@ def divide(x, y, name=None):
y = paddle.to_tensor([[0, 0, 0, -2], [0, 2, -3, 0], [2, 3, 4, 8]], 'float32')
sparse_x = x.to_sparse_csr()
sparse_y = y.to_sparse_csr()
sparse_z = paddle.incubate.sparse.divide(sparse_x, sparse_y)
sparse_z = paddle.sparse.divide(sparse_x, sparse_y)
print(sparse_z.to_dense())
# [[ nan , -inf. , nan , -1. ],
......@@ -438,9 +441,9 @@ def is_same_shape(x, y):
y = y.to_sparse_csr()
z = paddle.rand([2, 5])
paddle.incubate.sparse.is_same_shape(x, y)
paddle.sparse.is_same_shape(x, y)
# True
paddle.incubate.sparse.is_same_shape(x, z)
paddle.sparse.is_same_shape(x, z)
# False
"""
......
......@@ -107,7 +107,7 @@ def sparse_coo_tensor(indices,
indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0]
dense_shape = [3, 3]
coo = paddle.incubate.sparse.sparse_coo_tensor(indices, values, dense_shape)
coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
# print(coo)
# Tensor(shape=[2, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# indices=[[0, 1, 2],
......@@ -228,7 +228,7 @@ def sparse_csr_tensor(crows,
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
csr = paddle.incubate.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
# print(csr)
# Tensor(shape=[3, 4], dtype=paddle.int64, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 3, 5],
......
......@@ -58,6 +58,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
.. code-block:: python
# required: gpu
import paddle
# dense + csr @ dense -> dense
......@@ -65,17 +66,17 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
crows = [0, 1, 2, 3]
cols = [1, 2, 0]
values = [1., 2., 3.]
x = paddle.incubate.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.incubate.sparse.addmm(input, x, y, 3.0, 2.0)
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
# dense + coo @ dense -> dense
input = paddle.rand([3, 2])
indices = [[0, 1, 2], [1, 2, 0]]
values = [1., 2., 3.]
x = paddle.incubate.sparse.sparse_coo_tensor(indices, values, [3, 3])
x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.incubate.sparse.addmm(input, x, y, 3.0, 2.0)
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
"""
return _C_ops.sparse_addmm(input, x, y, alpha, beta)
......@@ -43,7 +43,7 @@ def relu(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.nn.functional.relu(sparse_x)
out = paddle.sparse.nn.functional.relu(sparse_x)
# [0., 0., 1.]
"""
if in_dynamic_mode():
......@@ -104,7 +104,7 @@ def softmax(x, axis=-1, name=None):
# values=[0.96823406, 0.19722934, 0.94373937, 0.02060066, 0.71456372,
# 0.98275049])
out = paddle.incubate.sparse.nn.functional.softmax(csr)
out = paddle.sparse.nn.functional.softmax(csr)
# Tensor(shape=[3, 4], dtype=paddle.float64, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 5, 6],
# cols=[2, 3, 0, 2, 3, 3],
......@@ -139,7 +139,7 @@ def relu6(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 8.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.nn.functional.relu6(sparse_x)
out = paddle.sparse.nn.functional.relu6(sparse_x)
"""
return _C_ops.sparse_relu6(x, 6.0)
......@@ -175,6 +175,6 @@ def leaky_relu(x, negative_slope=0.01, name=None):
dense_x = paddle.to_tensor([-2., 0., 5.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.nn.functional.leaky_relu(sparse_x, 0.5)
out = paddle.sparse.nn.functional.leaky_relu(sparse_x, 0.5)
"""
return _C_ops.sparse_leaky_relu(x, negative_slope)
......@@ -201,9 +201,9 @@ def conv3d(x,
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.incubate.sparse.nn.functional.conv3d(sparse_x, weight)
y = paddle.sparse.nn.functional.conv3d(sparse_x, weight)
print(y.shape)
# (1, 1, 1, 2, 1)
"""
......@@ -321,9 +321,9 @@ def subm_conv3d(x,
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.incubate.sparse.nn.functional.subm_conv3d(sparse_x, weight)
y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight)
print(y.shape)
#(1, 1, 3, 4, 1)
"""
......
......@@ -70,7 +70,7 @@ def max_pool3d(x,
kernel_sizes = [3, 3, 3]
paddings = [0, 0, 0]
strides = [1, 1, 1]
out = paddle.incubate.sparse.nn.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings)
out = paddle.sparse.nn.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings)
#[1, 2, 2, 2, 3]
"""
......
......@@ -37,7 +37,7 @@ def attention(query,
.. math::
result = softmax(\frac{ Q * K^T }{\sqrt{d}}) * V
result = softmax(\\frac{ Q * K^T }{\\sqrt{d}}) * V
where : ``Q``, ``K``, and ``V`` represent the three input parameters of the attention module.
The shape of the three parameters are: `[batch_size, num_heads, seq_len, head_dim]`, and
......@@ -64,6 +64,7 @@ def attention(query,
Examples:
.. code-block:: python
# required: gpu
import paddle
batch_size = 16
......@@ -85,7 +86,7 @@ def attention(query,
kp_mask = paddle.randint(0, 2, [batch_size, seq_len])
attn_mask = paddle.randint(0, 2, [seq_len, seq_len])
output = paddle.incubate.sparse.nn.functional.attention(query, key, value, sp_mask, kp_mask, attn_mask)
output = paddle.sparse.nn.functional.attention(query, key, value, sp_mask, kp_mask, attn_mask)
output.backward()
"""
return _C_ops.sparse_fused_attention(query, key, value, sparse_mask,
......
......@@ -41,7 +41,7 @@ class ReLU(Layer):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
relu = paddle.incubate.sparse.nn.ReLU()
relu = paddle.sparse.nn.ReLU()
out = relu(sparse_x)
# [0., 0., 1.]
"""
......@@ -102,7 +102,7 @@ class Softmax(Layer):
# values=[0.96823406, 0.19722934, 0.94373937, 0.02060066, 0.71456372,
# 0.98275049])
softmax = paddle.incubate.sparse.nn.Softmax()
softmax = paddle.sparse.nn.Softmax()
out = softmax(csr)
# Tensor(shape=[3, 4], dtype=paddle.float64, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 5, 6],
......@@ -147,7 +147,7 @@ class ReLU6(Layer):
dense_x = paddle.to_tensor([-2., 0., 8.])
sparse_x = dense_x.to_sparse_coo(1)
relu6 = paddle.incubate.sparse.nn.ReLU6()
relu6 = paddle.sparse.nn.ReLU6()
out = relu6(sparse_x)
"""
......@@ -194,7 +194,7 @@ class LeakyReLU(Layer):
dense_x = paddle.to_tensor([-2., 0., 5.])
sparse_x = dense_x.to_sparse_coo(1)
leaky_relu = paddle.incubate.sparse.nn.LeakyReLU(0.5)
leaky_relu = paddle.sparse.nn.LeakyReLU(0.5)
out = leaky_relu(sparse_x)
"""
......
......@@ -216,8 +216,8 @@ class Conv3D(_Conv3D):
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
conv = paddle.incubate.sparse.nn.Conv3D(1, 1, (1, 3, 3))
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
conv = paddle.sparse.nn.Conv3D(1, 1, (1, 3, 3))
y = conv(sparse_x)
print(y.shape)
# (1, 1, 1, 2, 1)
......@@ -353,8 +353,8 @@ class SubmConv3D(_Conv3D):
dense_shape = [1, 1, 3, 4, 1]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.incubate.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
subm_conv = paddle.incubate.sparse.nn.SubmConv3D(1, 1, (1, 3, 3))
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
subm_conv = paddle.sparse.nn.SubmConv3D(1, 1, (1, 3, 3))
y = subm_conv(sparse_x)
print(y.shape)
# (1, 1, 3, 4, 1)
......
......@@ -91,7 +91,7 @@ class BatchNorm(paddle.nn.BatchNorm1D):
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
dense_x = paddle.to_tensor(x_data)
sparse_x = dense_x.to_sparse_coo(4)
batch_norm = paddle.incubate.sparse.nn.BatchNorm(channels)
batch_norm = paddle.sparse.nn.BatchNorm(channels)
batch_norm_out = batch_norm(sparse_x)
print(batch_norm_out.shape)
# [1, 6, 6, 6, 3]
......@@ -262,7 +262,7 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
# required: gpu
import paddle
import paddle.incubate.sparse.nn as nn
import paddle.sparse.nn as nn
import numpy as np
x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32')
......@@ -306,7 +306,7 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
@classmethod
def convert_sync_batchnorm(cls, layer):
r"""
Helper function to convert :class: `paddle.incubate.sparse.nn.BatchNorm` layers in the model to :class: `paddle.incubate.sparse.nn.SyncBatchNorm` layers.
Helper function to convert :class: `paddle.sparse.nn.BatchNorm` layers in the model to :class: `paddle.sparse.nn.SyncBatchNorm` layers.
Parameters:
layer(paddle.nn.Layer): model containing one or more `BatchNorm` layers.
......@@ -319,7 +319,7 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
.. code-block:: python
import paddle
import paddle.incubate.sparse.nn as nn
import paddle.sparse.nn as nn
model = paddle.nn.Sequential(nn.Conv3D(3, 5, 3), nn.BatchNorm(5))
sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
......
......@@ -66,7 +66,7 @@ class MaxPool3D(Layer):
with _test_eager_guard():
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.incubate.sparse.nn.MaxPool3D(
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC')
out = max_pool3d(sparse_x)
#shape=[2, 1, 2, 2, 3]
......
......@@ -53,7 +53,7 @@ def sin(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.sin(sparse_x)
out = paddle.sparse.sin(sparse_x)
"""
return _C_ops.sparse_sin(x)
......@@ -83,7 +83,7 @@ def tan(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.tan(sparse_x)
out = paddle.sparse.tan(sparse_x)
"""
return _C_ops.sparse_tan(x)
......@@ -113,7 +113,7 @@ def asin(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.asin(sparse_x)
out = paddle.sparse.asin(sparse_x)
"""
return _C_ops.sparse_asin(x)
......@@ -144,7 +144,7 @@ def transpose(x, perm, name=None):
dense_x = paddle.to_tensor([[-2., 0.], [1., 2.]])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.transpose(sparse_x, [1, 0])
out = paddle.sparse.transpose(sparse_x, [1, 0])
"""
return _C_ops.sparse_transpose(x, perm)
......@@ -174,7 +174,7 @@ def atan(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.atan(sparse_x)
out = paddle.sparse.atan(sparse_x)
"""
return _C_ops.sparse_atan(x)
......@@ -204,7 +204,7 @@ def sinh(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.sinh(sparse_x)
out = paddle.sparse.sinh(sparse_x)
"""
return _C_ops.sparse_sinh(x)
......@@ -234,7 +234,7 @@ def asinh(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.asinh(sparse_x)
out = paddle.sparse.asinh(sparse_x)
"""
return _C_ops.sparse_asinh(x)
......@@ -264,7 +264,7 @@ def atanh(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.atanh(sparse_x)
out = paddle.sparse.atanh(sparse_x)
"""
return _C_ops.sparse_atanh(x)
......@@ -294,7 +294,7 @@ def tanh(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.tanh(sparse_x)
out = paddle.sparse.tanh(sparse_x)
"""
return _C_ops.sparse_tanh(x)
......@@ -324,7 +324,7 @@ def square(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.square(sparse_x)
out = paddle.sparse.square(sparse_x)
"""
return _C_ops.sparse_square(x)
......@@ -354,7 +354,7 @@ def sqrt(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.sqrt(sparse_x)
out = paddle.sparse.sqrt(sparse_x)
"""
return _C_ops.sparse_sqrt(x)
......@@ -384,7 +384,7 @@ def log1p(x, name=None):
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.log1p(sparse_x)
out = paddle.sparse.log1p(sparse_x)
"""
return _C_ops.sparse_log1p(x)
......@@ -415,7 +415,7 @@ def cast(x, index_dtype=None, value_dtype=None, name=None):
dense_x = paddle.to_tensor([-2, 0, 1])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.cast(sparse_x, 'int32', 'float64')
out = paddle.sparse.cast(sparse_x, 'int32', 'float64')
"""
if index_dtype and not isinstance(index_dtype, core.VarDesc.VarType):
......@@ -450,7 +450,7 @@ def pow(x, factor, name=None):
dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.pow(sparse_x, 2)
out = paddle.sparse.pow(sparse_x, 2)
"""
return _C_ops.sparse_pow(x, float(factor))
......@@ -480,7 +480,7 @@ def neg(x, name=None):
dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.neg(sparse_x)
out = paddle.sparse.neg(sparse_x)
"""
return _C_ops.sparse_scale(x, -1.0, 0.0, True)
......@@ -510,7 +510,7 @@ def abs(x, name=None):
dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.abs(sparse_x)
out = paddle.sparse.abs(sparse_x)
"""
return _C_ops.sparse_abs(x)
......@@ -532,12 +532,10 @@ def coalesce(x):
import paddle
from paddle.incubate import sparse
indices = [[0, 0, 1], [1, 1, 2]]
values = [1.0, 2.0, 3.0]
sp_x = sparse.sparse_coo_tensor(indices, values)
sp_x = sparse.coalesce(sp_x)
sp_x = paddle.sparse.sparse_coo_tensor(indices, values)
sp_x = paddle.sparse.coalesce(sp_x)
print(sp_x.indices())
#[[0, 1], [1, 2]]
print(sp_x.values())
......@@ -571,7 +569,7 @@ def rad2deg(x, name=None):
dense_x = paddle.to_tensor([3.142, 0., -3.142])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.rad2deg(sparse_x)
out = paddle.sparse.rad2deg(sparse_x)
"""
if x.dtype in _int_dtype_:
......@@ -604,7 +602,7 @@ def deg2rad(x, name=None):
dense_x = paddle.to_tensor([-180, 0, 180])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.deg2rad(sparse_x)
out = paddle.sparse.deg2rad(sparse_x)
"""
if x.dtype in _int_dtype_:
......@@ -636,7 +634,7 @@ def expm1(x, name=None):
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.expm1(sparse_x)
out = paddle.sparse.expm1(sparse_x)
"""
return _C_ops.sparse_expm1(x)
......@@ -689,7 +687,7 @@ def reshape(x, shape, name=None):
sp_x = dense_x.to_sparse_coo(len(x_shape))
else:
sp_x = dense_x.to_sparse_csr()
sp_out = paddle.incubate.sparse.reshape(sp_x, new_shape)
sp_out = paddle.sparse.reshape(sp_x, new_shape)
print(sp_out)
# the shape of sp_out is [1, 2, 2, 3, 3]
......
......@@ -283,7 +283,6 @@ packages=['paddle',
'paddle.incubate.tensor',
'paddle.incubate.multiprocessing',
'paddle.incubate.nn',
'paddle.incubate.sparse',
'paddle.incubate.asp',
'paddle.incubate.passes',
'paddle.distribution',
......@@ -371,10 +370,10 @@ packages=['paddle',
'paddle.vision.transforms',
'paddle.vision.datasets',
'paddle.audio',
'paddle.audio.functional',
'paddle.audio.features',
'paddle.audio.datasets',
'paddle.audio.backends',
'paddle.audio.functional',
'paddle.audio.features',
'paddle.audio.datasets',
'paddle.audio.backends',
'paddle.text',
'paddle.text.datasets',
'paddle.incubate',
......@@ -388,10 +387,10 @@ packages=['paddle',
'paddle.incubate.distributed.models',
'paddle.incubate.distributed.models.moe',
'paddle.incubate.distributed.models.moe.gate',
'paddle.incubate.sparse',
'paddle.incubate.sparse.nn',
'paddle.incubate.sparse.nn.layer',
'paddle.incubate.sparse.nn.functional',
'paddle.sparse',
'paddle.sparse.nn',
'paddle.sparse.nn.layer',
'paddle.sparse.nn.functional',
'paddle.incubate.xpu',
'paddle.io',
'paddle.optimizer',
......
......@@ -262,6 +262,9 @@ def check_public_api():
paddle.audio,
paddle.audio.backends,
paddle.audio.datasets,
paddle.sparse,
paddle.sparse.nn,
paddle.sparse.nn.functional,
]
apinum = 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册