Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
13add823
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
13add823
编写于
6月 01, 2022
作者:
zhouweiwei2014
提交者:
GitHub
6月 01, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Unify sparse api in paddle.incubate (#43122)
上级
664758fa
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
196 addition
and
165 deletion
+196
-165
python/paddle/__init__.py
python/paddle/__init__.py
+0
-1
python/paddle/fluid/dygraph/varbase_patch_methods.py
python/paddle/fluid/dygraph/varbase_patch_methods.py
+2
-2
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
+12
-11
python/paddle/fluid/tests/unittests/test_sparse_norm_op.py
python/paddle/fluid/tests/unittests/test_sparse_norm_op.py
+3
-3
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
...on/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_sparse_unary_op.py
python/paddle/fluid/tests/unittests/test_sparse_unary_op.py
+12
-12
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
+44
-39
python/paddle/incubate/__init__.py
python/paddle/incubate/__init__.py
+1
-0
python/paddle/incubate/sparse/__init__.py
python/paddle/incubate/sparse/__init__.py
+15
-6
python/paddle/incubate/sparse/creation.py
python/paddle/incubate/sparse/creation.py
+6
-7
python/paddle/incubate/sparse/nn/__init__.py
python/paddle/incubate/sparse/nn/__init__.py
+11
-14
python/paddle/incubate/sparse/nn/functional/__init__.py
python/paddle/incubate/sparse/nn/functional/__init__.py
+7
-5
python/paddle/incubate/sparse/nn/functional/activation.py
python/paddle/incubate/sparse/nn/functional/activation.py
+55
-0
python/paddle/incubate/sparse/nn/functional/conv.py
python/paddle/incubate/sparse/nn/functional/conv.py
+7
-7
python/paddle/incubate/sparse/nn/functional/pooling.py
python/paddle/incubate/sparse/nn/functional/pooling.py
+2
-2
python/paddle/incubate/sparse/nn/layer/activation.py
python/paddle/incubate/sparse/nn/layer/activation.py
+1
-1
python/paddle/incubate/sparse/nn/layer/conv.py
python/paddle/incubate/sparse/nn/layer/conv.py
+6
-6
python/paddle/incubate/sparse/nn/layer/norm.py
python/paddle/incubate/sparse/nn/layer/norm.py
+2
-2
python/paddle/incubate/sparse/nn/layer/pooling.py
python/paddle/incubate/sparse/nn/layer/pooling.py
+1
-1
python/paddle/incubate/sparse/unary.py
python/paddle/incubate/sparse/unary.py
+3
-41
python/setup.py.in
python/setup.py.in
+4
-3
未找到文件。
python/paddle/__init__.py
浏览文件 @
13add823
...
...
@@ -75,7 +75,6 @@ import paddle.onnx # noqa: F401
import
paddle.reader
# noqa: F401
import
paddle.static
# noqa: F401
import
paddle.vision
# noqa: F401
import
paddle.sparse
# noqa: F401
from
.tensor.attribute
import
is_complex
# noqa: F401
from
.tensor.attribute
import
is_integer
# noqa: F401
...
...
python/paddle/fluid/dygraph/varbase_patch_methods.py
浏览文件 @
13add823
...
...
@@ -902,7 +902,7 @@ def monkey_patch_varbase():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
sparse_x = paddle.
incubate.
sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
print(sparse_x.values())
#[1, 2, 3, 4, 5]
"""
...
...
@@ -932,7 +932,7 @@ def monkey_patch_varbase():
indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
sparse_x = paddle.
incubate.
sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape)
dense_x = sparse_x.to_dense()
#[[0., 1., 0., 2.],
# [0., 0., 3., 0.],
...
...
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
浏览文件 @
13add823
...
...
@@ -41,7 +41,7 @@ class TestSparseConv(unittest.TestCase):
correct_out_values
=
[[
5
],
[
11
]]
sparse_input
=
core
.
eager
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
False
)
out
=
paddle
.
sparse
.
functional
.
conv3d
(
out
=
paddle
.
incubate
.
sparse
.
nn
.
functional
.
conv3d
(
sparse_input
,
dense_kernel
,
bias
=
paddle
.
to_tensor
(
...
...
@@ -61,10 +61,11 @@ class TestSparseConv(unittest.TestCase):
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
dense_shape
=
[
1
,
1
,
3
,
4
,
1
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
stop_gradient
=
True
)
weight
=
paddle
.
randn
((
1
,
3
,
3
,
1
,
1
),
dtype
=
'float32'
)
y
=
paddle
.
sparse
.
functional
.
subm_conv3d
(
sparse_x
,
weight
)
y
=
paddle
.
incubate
.
sparse
.
nn
.
functional
.
subm_conv3d
(
sparse_x
,
weight
)
assert
np
.
array_equal
(
sparse_x
.
indices
().
numpy
(),
y
.
indices
().
numpy
())
...
...
@@ -78,16 +79,16 @@ class TestSparseConv(unittest.TestCase):
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
dense_shape
=
[
1
,
1
,
3
,
4
,
1
]
correct_out_values
=
[[
4
],
[
10
]]
sparse_input
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
False
)
sparse_input
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
False
)
sparse_conv3d
=
paddle
.
sparse
.
Conv3D
(
sparse_conv3d
=
paddle
.
incubate
.
sparse
.
nn
.
Conv3D
(
1
,
1
,
(
1
,
3
,
3
),
data_format
=
'NDHWC'
)
sparse_out
=
sparse_conv3d
(
sparse_input
)
#test errors
with
self
.
assertRaises
(
ValueError
):
#Currently, only support data_format='NDHWC'
conv3d
=
paddle
.
sparse
.
SubmConv3D
(
conv3d
=
paddle
.
incubate
.
sparse
.
nn
.
SubmConv3D
(
1
,
1
,
(
1
,
3
,
3
),
data_format
=
'NCDHW'
)
def
test_SubmConv3D
(
self
):
...
...
@@ -98,10 +99,10 @@ class TestSparseConv(unittest.TestCase):
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
dense_shape
=
[
1
,
1
,
3
,
4
,
1
]
correct_out_values
=
[[
4
],
[
10
]]
sparse_input
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
False
)
sparse_input
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
False
)
subm_conv3d
=
paddle
.
sparse
.
SubmConv3D
(
subm_conv3d
=
paddle
.
incubate
.
sparse
.
nn
.
SubmConv3D
(
1
,
1
,
(
1
,
3
,
3
),
data_format
=
'NDHWC'
)
# test extra_repr
print
(
subm_conv3d
.
extra_repr
())
...
...
@@ -113,5 +114,5 @@ class TestSparseConv(unittest.TestCase):
#test errors
with
self
.
assertRaises
(
ValueError
):
#Currently, only support data_format='NDHWC'
conv3d
=
paddle
.
sparse
.
SubmConv3D
(
conv3d
=
paddle
.
incubate
.
sparse
.
nn
.
SubmConv3D
(
1
,
1
,
(
1
,
3
,
3
),
data_format
=
'NCDHW'
)
python/paddle/fluid/tests/unittests/test_sparse_norm_op.py
浏览文件 @
13add823
...
...
@@ -38,7 +38,7 @@ class TestSparseBatchNorm(unittest.TestCase):
dense_x2
=
copy
.
deepcopy
(
dense_x
)
dense_x2
.
stop_gradient
=
False
sparse_x
=
dense_x2
.
to_sparse_coo
(
sparse_dim
)
sparse_batch_norm
=
paddle
.
sparse
.
BatchNorm
(
channels
)
sparse_batch_norm
=
paddle
.
incubate
.
sparse
.
nn
.
BatchNorm
(
channels
)
# set same params
sparse_batch_norm
.
_mean
.
set_value
(
batch_norm
.
_mean
)
sparse_batch_norm
.
_variance
.
set_value
(
batch_norm
.
_variance
)
...
...
@@ -66,7 +66,7 @@ class TestSparseBatchNorm(unittest.TestCase):
shape
=
[
2
,
3
,
6
,
6
,
3
]
x
=
paddle
.
randn
(
shape
)
sparse_x
=
x
.
to_sparse_coo
(
4
)
sparse_batch_norm
=
paddle
.
sparse
.
BatchNorm
(
sparse_batch_norm
=
paddle
.
incubate
.
sparse
.
nn
.
BatchNorm
(
3
,
data_format
=
'NCDHW'
)
sparse_batch_norm
(
sparse_x
)
...
...
@@ -77,7 +77,7 @@ class TestSparseBatchNorm(unittest.TestCase):
x_data
=
paddle
.
randn
((
1
,
6
,
6
,
6
,
channels
)).
astype
(
'float32'
)
dense_x
=
paddle
.
to_tensor
(
x_data
)
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
batch_norm
=
paddle
.
sparse
.
BatchNorm
(
channels
)
batch_norm
=
paddle
.
incubate
.
sparse
.
nn
.
BatchNorm
(
channels
)
batch_norm_out
=
batch_norm
(
sparse_x
)
print
(
batch_norm_out
.
shape
)
# [1, 6, 6, 6, 3]
...
...
python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py
浏览文件 @
13add823
...
...
@@ -47,7 +47,7 @@ class TestMaxPool3DFunc(unittest.TestCase):
self
.
setUp
()
self
.
dense_x
.
stop_gradient
=
False
sparse_x
=
self
.
dense_x
.
to_sparse_coo
(
4
)
sparse_out
=
paddle
.
sparse
.
functional
.
max_pool3d
(
sparse_out
=
paddle
.
incubate
.
sparse
.
nn
.
functional
.
max_pool3d
(
sparse_x
,
self
.
kernel_sizes
,
stride
=
self
.
strides
,
...
...
@@ -104,7 +104,7 @@ class TestMaxPool3DAPI(unittest.TestCase):
with
_test_eager_guard
():
dense_x
=
paddle
.
randn
((
2
,
3
,
6
,
6
,
3
))
sparse_x
=
dense_x
.
to_sparse_coo
(
4
)
max_pool3d
=
paddle
.
sparse
.
MaxPool3D
(
max_pool3d
=
paddle
.
incubate
.
sparse
.
nn
.
MaxPool3D
(
kernel_size
=
3
,
data_format
=
'NDHWC'
)
out
=
max_pool3d
(
sparse_x
)
out
=
out
.
to_dense
()
...
...
python/paddle/fluid/tests/unittests/test_sparse_unary_op.py
浏览文件 @
13add823
...
...
@@ -67,15 +67,15 @@ class TestSparseUnary(unittest.TestCase):
x
,
lambda
x
:
x
.
to_sparse_coo
(
sparse_dim
),
paddle
.
nn
.
ReLU
(),
paddle
.
sparse
.
ReLU
(),
paddle
.
incubate
.
sparse
.
nn
.
ReLU
(),
True
,
)
self
.
compare_with_dense
(
x
,
lambda
x
:
x
.
to_sparse_csr
(),
paddle
.
nn
.
ReLU
(),
paddle
.
sparse
.
ReLU
(),
paddle
.
incubate
.
sparse
.
nn
.
ReLU
(),
False
,
)
self
.
assert_raises_on_dense_tensor
(
paddle
.
sparse
.
ReLU
())
self
.
assert_raises_on_dense_tensor
(
paddle
.
incubate
.
sparse
.
nn
.
ReLU
())
def
test_sparse_sqrt
(
self
):
x
=
[[
0
,
16
,
0
,
0
],
[
0
,
0
,
0
,
0
],
[
0
,
4
,
2
,
0
]]
...
...
@@ -84,15 +84,15 @@ class TestSparseUnary(unittest.TestCase):
x
,
lambda
x
:
x
.
to_sparse_coo
(
sparse_dim
),
paddle
.
sqrt
,
paddle
.
sparse
.
sqrt
,
paddle
.
incubate
.
sparse
.
sqrt
,
True
,
)
self
.
compare_with_dense
(
x
,
lambda
x
:
x
.
to_sparse_csr
(),
paddle
.
sqrt
,
paddle
.
sparse
.
sqrt
,
paddle
.
incubate
.
sparse
.
sqrt
,
False
,
)
self
.
assert_raises_on_dense_tensor
(
paddle
.
sparse
.
sqrt
)
self
.
assert_raises_on_dense_tensor
(
paddle
.
incubate
.
sparse
.
sqrt
)
def
test_sparse_sin
(
self
):
x
=
[[
0
,
16
,
0
,
0
],
[
0
,
0
,
0
,
0
],
[
0
,
4
,
2
,
0
]]
...
...
@@ -101,15 +101,15 @@ class TestSparseUnary(unittest.TestCase):
x
,
lambda
x
:
x
.
to_sparse_coo
(
sparse_dim
),
paddle
.
sin
,
paddle
.
sparse
.
sin
,
paddle
.
incubate
.
sparse
.
sin
,
True
,
)
self
.
compare_with_dense
(
x
,
lambda
x
:
x
.
to_sparse_csr
(),
paddle
.
sin
,
paddle
.
sparse
.
sin
,
paddle
.
incubate
.
sparse
.
sin
,
False
,
)
self
.
assert_raises_on_dense_tensor
(
paddle
.
sparse
.
sin
)
self
.
assert_raises_on_dense_tensor
(
paddle
.
incubate
.
sparse
.
sin
)
def
test_sparse_tanh
(
self
):
x
=
[[
0
,
16
,
0
,
0
],
[
0
,
0
,
0
,
0
],
[
0
,
-
4
,
2
,
0
]]
...
...
@@ -118,15 +118,15 @@ class TestSparseUnary(unittest.TestCase):
x
,
lambda
x
:
x
.
to_sparse_coo
(
sparse_dim
),
paddle
.
tanh
,
paddle
.
sparse
.
tanh
,
paddle
.
incubate
.
sparse
.
tanh
,
True
,
)
self
.
compare_with_dense
(
x
,
lambda
x
:
x
.
to_sparse_csr
(),
paddle
.
tanh
,
paddle
.
sparse
.
tanh
,
paddle
.
incubate
.
sparse
.
tanh
,
False
,
)
self
.
assert_raises_on_dense_tensor
(
paddle
.
sparse
.
tanh
)
self
.
assert_raises_on_dense_tensor
(
paddle
.
incubate
.
sparse
.
tanh
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
浏览文件 @
13add823
...
...
@@ -30,7 +30,7 @@ class TestSparseCreate(unittest.TestCase):
dense_shape
=
[
3
,
4
]
dense_indices
=
paddle
.
to_tensor
(
indices
)
dense_elements
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
coo
=
paddle
.
sparse
.
sparse_coo_tensor
(
coo
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
dense_indices
,
dense_elements
,
dense_shape
,
stop_gradient
=
False
)
# test the to_string.py
print
(
coo
)
...
...
@@ -42,7 +42,8 @@ class TestSparseCreate(unittest.TestCase):
indices
=
[[
0
,
1
,
2
],
[
1
,
2
,
0
]]
values
=
[
1.0
,
2.0
,
3.0
]
dense_shape
=
[
3
,
3
]
coo
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
)
coo
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
)
assert
np
.
array_equal
(
indices
,
coo
.
indices
().
numpy
())
assert
np
.
array_equal
(
values
,
coo
.
values
().
numpy
())
...
...
@@ -56,7 +57,7 @@ class TestSparseCreate(unittest.TestCase):
dense_cols
=
paddle
.
to_tensor
(
cols
)
dense_elements
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
stop_gradient
=
False
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
csr
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
dense_crows
,
dense_cols
,
dense_elements
,
...
...
@@ -69,8 +70,8 @@ class TestSparseCreate(unittest.TestCase):
cols
=
[
1
,
3
,
2
,
0
,
1
]
values
=
[
1
,
2
,
3
,
4
,
5
]
dense_shape
=
[
3
,
4
]
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
dense_shape
)
csr
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
dense_shape
)
# test the to_string.py
print
(
csr
)
assert
np
.
array_equal
(
crows
,
csr
.
crows
().
numpy
())
...
...
@@ -83,7 +84,7 @@ class TestSparseCreate(unittest.TestCase):
indices
=
[[
0
,
1
],
[
0
,
1
]]
values
=
[
1.0
,
2.0
]
dense_shape
=
[
2
,
2
]
coo
=
paddle
.
sparse
.
sparse_coo_tensor
(
coo
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
place
=
place
)
assert
coo
.
place
.
is_cpu_place
()
assert
coo
.
values
().
place
.
is_cpu_place
()
...
...
@@ -92,7 +93,7 @@ class TestSparseCreate(unittest.TestCase):
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
csr
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
[
3
,
5
],
place
=
place
)
assert
csr
.
place
.
is_cpu_place
()
assert
csr
.
crows
().
place
.
is_cpu_place
()
...
...
@@ -106,14 +107,14 @@ class TestSparseCreate(unittest.TestCase):
dense_shape
=
[
2
,
2
]
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
coo
=
paddle
.
sparse
.
sparse_coo_tensor
(
coo
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
,
dtype
=
'float64'
)
assert
coo
.
dtype
==
paddle
.
float64
crows
=
[
0
,
2
,
3
,
5
]
cols
=
[
1
,
3
,
2
,
0
,
1
]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
csr
=
paddle
.
sparse
.
sparse_csr_tensor
(
csr
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
[
3
,
5
],
dtype
=
'float16'
)
assert
csr
.
dtype
==
paddle
.
float16
...
...
@@ -123,7 +124,7 @@ class TestSparseCreate(unittest.TestCase):
values
=
[
1.0
,
2.0
]
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
coo
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
coo
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
assert
[
2
,
2
]
==
coo
.
shape
...
...
@@ -140,7 +141,7 @@ class TestSparseConvert(unittest.TestCase):
#test to_sparse_coo_grad backward
out_grad_indices
=
[[
0
,
1
],
[
0
,
1
]]
out_grad_values
=
[
2.0
,
3.0
]
out_grad
=
paddle
.
sparse
.
sparse_coo_tensor
(
out_grad
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
paddle
.
to_tensor
(
out_grad_indices
),
paddle
.
to_tensor
(
out_grad_values
),
shape
=
out
.
shape
,
...
...
@@ -153,7 +154,7 @@ class TestSparseConvert(unittest.TestCase):
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
paddle
.
to_tensor
(
indices
),
paddle
.
to_tensor
(
values
),
shape
=
[
3
,
4
],
...
...
@@ -169,7 +170,7 @@ class TestSparseConvert(unittest.TestCase):
sparse_x
.
grad
.
values
().
numpy
())
paddle
.
device
.
set_device
(
"cpu"
)
sparse_x_cpu
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x_cpu
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
paddle
.
to_tensor
(
indices
),
paddle
.
to_tensor
(
values
),
shape
=
[
3
,
4
],
...
...
@@ -198,7 +199,7 @@ class TestSparseConvert(unittest.TestCase):
with
_test_eager_guard
():
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
paddle
.
to_tensor
(
indices
),
paddle
.
to_tensor
(
values
),
shape
=
[
3
,
4
],
...
...
@@ -211,7 +212,7 @@ class TestSparseConvert(unittest.TestCase):
indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
values
=
[[
1.0
,
1.0
],
[
2.0
,
2.0
],
[
3.0
,
3.0
],
[
4.0
,
4.0
],
[
5.0
,
5.0
]]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
paddle
.
to_tensor
(
indices
),
paddle
.
to_tensor
(
values
),
shape
=
[
3
,
4
,
2
],
...
...
@@ -234,13 +235,13 @@ class TestSparseConvert(unittest.TestCase):
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
,
stop_gradient
=
False
)
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
shape
=
[
2
,
2
],
stop_gradient
=
False
)
grad_indices
=
[[
0
,
1
],
[
1
,
1
]]
grad_values
=
[
2
,
3
]
grad_indices
=
paddle
.
to_tensor
(
grad_indices
,
dtype
=
'int32'
)
grad_values
=
paddle
.
to_tensor
(
grad_values
,
dtype
=
'float32'
)
sparse_out_grad
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_out_grad
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
grad_indices
,
grad_values
,
shape
=
[
2
,
2
])
sparse_x
.
backward
(
sparse_out_grad
)
correct_values_grad
=
[
0
,
3
]
...
...
@@ -251,11 +252,11 @@ class TestSparseConvert(unittest.TestCase):
values
=
[[
1
,
1
],
[
2
,
2
]]
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
,
stop_gradient
=
False
)
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
shape
=
[
2
,
2
,
2
],
stop_gradient
=
False
)
grad_values
=
[[
2
,
2
],
[
3
,
3
]]
grad_values
=
paddle
.
to_tensor
(
grad_values
,
dtype
=
'float32'
)
sparse_out_grad
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_out_grad
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
grad_indices
,
grad_values
,
shape
=
[
2
,
2
,
2
])
sparse_x
.
backward
(
sparse_out_grad
)
correct_values_grad
=
[[
0
,
0
],
[
3
,
3
]]
...
...
@@ -273,7 +274,8 @@ class TestSparseConvert(unittest.TestCase):
values
=
[
1.0
,
2.0
,
3.0
]
indices
=
paddle
.
to_tensor
(
indices
,
dtype
=
'int32'
)
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
indices_sorted
=
[[
0
,
1
],
[
1
,
0
]]
values_sorted
=
[
5.0
,
1.0
]
assert
np
.
array_equal
(
indices_sorted
,
...
...
@@ -284,7 +286,8 @@ class TestSparseConvert(unittest.TestCase):
# test the non-zero values is a vector
values
=
[[
1.0
,
1.0
],
[
2.0
,
2.0
],
[
3.0
,
3.0
]]
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
values_sorted
=
[[
5.0
,
5.0
],
[
1.0
,
1.0
]]
assert
np
.
array_equal
(
indices_sorted
,
sparse_x
.
indices
().
numpy
())
...
...
@@ -300,7 +303,7 @@ class TestCooError(unittest.TestCase):
values
=
[
1
,
2
]
# 1. the shape too small
dense_shape
=
[
2
,
2
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
shape
=
dense_shape
)
def
test_same_nnz
(
self
):
...
...
@@ -309,7 +312,8 @@ class TestCooError(unittest.TestCase):
# 2. test the nnz of indices must same as nnz of values
indices
=
[[
1
,
2
],
[
1
,
0
]]
values
=
[
1
,
2
,
3
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
def
test_same_dimensions
(
self
):
with
_test_eager_guard
():
...
...
@@ -317,7 +321,7 @@ class TestCooError(unittest.TestCase):
indices
=
[[
1
,
2
],
[
1
,
0
]]
values
=
[
1
,
2
,
3
]
shape
=
[
2
,
3
,
4
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
,
shape
=
shape
)
def
test_indices_dtype
(
self
):
...
...
@@ -325,7 +329,8 @@ class TestCooError(unittest.TestCase):
with
self
.
assertRaises
(
TypeError
):
indices
=
[[
1.0
,
2.0
],
[
0
,
1
]]
values
=
[
1
,
2
]
sparse_x
=
paddle
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
class
TestCsrError
(
unittest
.
TestCase
):
...
...
@@ -336,8 +341,8 @@ class TestCsrError(unittest.TestCase):
cols
=
[
0
,
1
,
2
]
values
=
[
1
,
2
,
3
]
shape
=
[
3
]
sparse_x
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
def
test_dimension2
(
self
):
with
_test_eager_guard
():
...
...
@@ -346,8 +351,8 @@ class TestCsrError(unittest.TestCase):
cols
=
[
0
,
1
,
2
]
values
=
[
1
,
2
,
3
]
shape
=
[
3
,
3
,
3
,
3
]
sparse_x
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
def
test_same_shape1
(
self
):
with
_test_eager_guard
():
...
...
@@ -356,8 +361,8 @@ class TestCsrError(unittest.TestCase):
cols
=
[
0
,
1
,
2
,
3
]
values
=
[
1
,
2
,
3
]
shape
=
[
3
,
4
]
sparse_x
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
def
test_same_shape2
(
self
):
with
_test_eager_guard
():
...
...
@@ -366,8 +371,8 @@ class TestCsrError(unittest.TestCase):
cols
=
[
0
,
1
,
2
,
3
]
values
=
[
1
,
2
,
3
,
4
]
shape
=
[
3
,
4
]
sparse_x
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
def
test_same_shape3
(
self
):
with
_test_eager_guard
():
...
...
@@ -376,8 +381,8 @@ class TestCsrError(unittest.TestCase):
cols
=
[
0
,
1
,
2
,
3
,
0
,
1
,
2
]
values
=
[
1
,
2
,
3
,
4
,
0
,
1
,
2
]
shape
=
[
2
,
3
,
4
]
sparse_x
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
def
test_crows_first_value
(
self
):
with
_test_eager_guard
():
...
...
@@ -386,8 +391,8 @@ class TestCsrError(unittest.TestCase):
cols
=
[
0
,
1
,
2
]
values
=
[
1
,
2
,
3
]
shape
=
[
3
,
4
]
sparse_x
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
def
test_dtype
(
self
):
with
_test_eager_guard
():
...
...
@@ -396,8 +401,8 @@ class TestCsrError(unittest.TestCase):
cols
=
[
0
,
1
,
2
]
values
=
[
1
,
2
,
3
]
shape
=
[
3
]
sparse_x
=
paddle
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_csr_tensor
(
crows
,
cols
,
values
,
shape
)
if
__name__
==
"__main__"
:
...
...
python/paddle/incubate/__init__.py
浏览文件 @
13add823
...
...
@@ -30,6 +30,7 @@ from .tensor import segment_min
from
.passes
import
fuse_resnet_unit_pass
import
paddle.incubate.autograd
import
paddle.incubate.autotune
import
paddle.incubate.sparse
from
.
import
nn
#noqa: F401
from
.
import
asp
#noqa: F401
...
...
python/paddle/
sparse/layer
/__init__.py
→
python/paddle/
incubate/sparse
/__init__.py
浏览文件 @
13add823
...
...
@@ -12,10 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
.unary
import
ReLU
from
.norm
import
BatchNorm
from
.conv
import
Conv3D
from
.conv
import
SubmConv3D
from
.pooling
import
MaxPool3D
from
.creation
import
sparse_coo_tensor
from
.creation
import
sparse_csr_tensor
__all__
=
[]
from
.unary
import
sqrt
from
.unary
import
sin
from
.unary
import
tanh
from
.
import
nn
__all__
=
[
'sparse_coo_tensor'
,
'sparse_csr_tensor'
,
'sqrt'
,
'sin'
,
'tanh'
,
]
python/paddle/sparse/creation.py
→
python/paddle/
incubate/
sparse/creation.py
浏览文件 @
13add823
...
...
@@ -14,11 +14,10 @@
import
paddle
from
paddle
import
_C_ops
from
..framework
import
core
,
dygraph_only
from
..framework
import
_current_expected_place
,
_get_paddle_place
from
..tensor
import
to_tensor
from
..tensor
import
max
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
paddle.fluid.framework
import
core
,
dygraph_only
from
paddle.fluid.framework
import
_current_expected_place
,
_get_paddle_place
from
paddle.tensor
import
to_tensor
,
max
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
import
numpy
as
np
...
...
@@ -112,7 +111,7 @@ def sparse_coo_tensor(indices,
indices = [[0, 1, 2], [1, 2, 0]]
values = [1.0, 2.0, 3.0]
dense_shape = [3, 3]
coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
coo = paddle.
incubate.
sparse.sparse_coo_tensor(indices, values, dense_shape)
# print(coo)
# Tensor(shape=[2, 3], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# indices=[[0, 1, 2],
...
...
@@ -222,7 +221,7 @@ def sparse_csr_tensor(crows,
cols = [1, 3, 2, 0, 1]
values = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
csr = paddle.
incubate.
sparse.sparse_csr_tensor(crows, cols, values, dense_shape)
# print(csr)
# Tensor(shape=[3, 4], dtype=paddle.int64, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 2, 3, 5],
...
...
python/paddle/
sparse
/__init__.py
→
python/paddle/
incubate/sparse/nn
/__init__.py
浏览文件 @
13add823
...
...
@@ -12,21 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
.creation
import
sparse_coo_tensor
from
.creation
import
sparse_csr_tensor
from
.layer
import
ReLU
from
.layer
import
BatchNorm
from
.
import
functional
from
.layer
import
Conv3D
from
.layer
import
SubmConv3D
from
.layer
import
MaxPool3D
from
.functional
import
sqrt
from
.functional
import
sin
from
.functional
import
tanh
from
.layer.activation
import
ReLU
from
.layer.norm
import
BatchNorm
from
.layer.conv
import
Conv3D
from
.layer.conv
import
SubmConv3D
from
.layer.pooling
import
MaxPool3D
__all__
=
[
'sparse_coo_tensor'
,
'sparse_csr_tensor'
,
'ReLU'
,
'Conv3D'
,
'SubmConv3D'
,
'BatchNorm'
,
'MaxPool3D'
,
'sqrt'
,
'sin'
,
'tanh'
'ReLU'
,
'BatchNorm'
,
'Conv3D'
,
'SubmConv3D'
,
'MaxPool3D'
,
]
python/paddle/
sparse
/functional/__init__.py
→
python/paddle/
incubate/sparse/nn
/functional/__init__.py
浏览文件 @
13add823
...
...
@@ -12,12 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
.unary
import
relu
# noqa: F401
from
.unary
import
tanh
# noqa: F401
from
.unary
import
sqrt
# noqa: F401
from
.unary
import
sin
# noqa: F401
from
.conv
import
conv3d
# noqa: F401
from
.conv
import
subm_conv3d
# noqa: F401
from
.pooling
import
max_pool3d
# noqa: F401
from
.activation
import
relu
# noqa: F401
__all__
=
[
'relu'
,
'tanh'
,
'conv3d'
,
'subm_conv3d'
,
'max_pool3d'
,
'sqrt'
,
'sin'
]
__all__
=
[
'conv3d'
,
'subm_conv3d'
,
'max_pool3d'
,
'relu'
,
]
python/paddle/incubate/sparse/nn/functional/activation.py
0 → 100644
浏览文件 @
13add823
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__
=
[]
from
paddle
import
_C_ops
,
in_dynamic_mode
def
relu
(
x
,
name
=
None
):
"""
sparse relu activation, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = max(x, 0)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.incubate.sparse.nn.functional.relu(sparse_x)
"""
assert
in_dynamic_mode
(),
"Currently, Sparse API only support dynamic mode"
if
x
.
is_sparse_coo
()
or
x
.
is_sparse_csr
():
return
_C_ops
.
final_state_sparse_relu
(
x
)
else
:
raise
ValueError
(
"Currently, sparse.relu only support the input of SparseCooTensor or SparseCsrTensor"
)
python/paddle/
sparse
/functional/conv.py
→
python/paddle/
incubate/sparse/nn
/functional/conv.py
浏览文件 @
13add823
...
...
@@ -15,9 +15,9 @@
__all__
=
[]
from
paddle
import
_C_ops
,
in_dynamic_mode
from
..
.fluid.layers.utils
import
convert_to_list
from
..
.fluid.layers.nn
import
elementwise_add
from
..
import
sparse_coo_tensor
from
paddle
.fluid.layers.utils
import
convert_to_list
from
paddle
.fluid.layers.nn
import
elementwise_add
from
..
.creation
import
sparse_coo_tensor
from
paddle.nn.functional.conv
import
_update_padding_nd
...
...
@@ -180,9 +180,9 @@ def conv3d(x,
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
sparse_x = paddle.
incubate.
sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.
sparse
.functional.conv3d(sparse_x, weight)
y = paddle.
incubate.sparse.nn
.functional.conv3d(sparse_x, weight)
print(y.shape)
# (1, 1, 1, 2, 1)
"""
...
...
@@ -295,9 +295,9 @@ def subm_conv3d(x,
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
sparse_x = paddle.
incubate.
sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
y = paddle.
sparse
.functional.subm_conv3d(sparse_x, weight)
y = paddle.
incubate.sparse.nn
.functional.subm_conv3d(sparse_x, weight)
print(y.shape)
#(1, 1, 3, 4, 1)
"""
...
...
python/paddle/
sparse
/functional/pooling.py
→
python/paddle/
incubate/sparse/nn
/functional/pooling.py
浏览文件 @
13add823
...
...
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
..
.fluid.layers
import
utils
from
paddle
.fluid.layers
import
utils
from
paddle
import
_C_ops
,
in_dynamic_mode
from
paddle.nn.functional.pooling
import
_update_padding_nd
...
...
@@ -70,7 +70,7 @@ def max_pool3d(x,
kernel_sizes = [3, 3, 3]
paddings = [0, 0, 0]
strides = [1, 1, 1]
out = paddle.
sparse
.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings)
out = paddle.
incubate.sparse.nn
.functional.max_pool3d(sparse_x, kernel_sizes, stride=strides, padding=paddings)
#[1, 2, 2, 2, 3]
"""
...
...
python/paddle/
sparse/layer/unary
.py
→
python/paddle/
incubate/sparse/nn/layer/activation
.py
浏览文件 @
13add823
...
...
@@ -44,7 +44,7 @@ class ReLU(Layer):
dense_x = paddle.to_tensor(x, dtype='float32')
sparse_dim = 2
sparse_x = dense_x.to_sparse_coo(sparse_dim)
relu = paddle.
sparse
.ReLU()
relu = paddle.
incubate.sparse.nn
.ReLU()
out = relu(sparse_x)
#out.values: [0., 2., 0., 4., 5.]
"""
...
...
python/paddle/
sparse
/layer/conv.py
→
python/paddle/
incubate/sparse/nn
/layer/conv.py
浏览文件 @
13add823
...
...
@@ -16,8 +16,8 @@ import numpy as np
from
..
import
functional
as
F
from
paddle.nn
import
Layer
from
paddle.nn.initializer
import
Normal
from
.
.functional.conv
import
_update_padding_nd
from
..
.fluid.layers
import
utils
from
paddle.nn
.functional.conv
import
_update_padding_nd
from
paddle
.fluid.layers
import
utils
__all__
=
[]
...
...
@@ -213,8 +213,8 @@ class Conv3D(_Conv3D):
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
dense_shape = [1, 1, 3, 4, 1]
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
conv = paddle.
sparse
.Conv3D(1, 1, (1, 3, 3))
sparse_x = paddle.
incubate.
sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
conv = paddle.
incubate.sparse.nn
.Conv3D(1, 1, (1, 3, 3))
y = conv(sparse_x)
print(y.shape)
# (1, 1, 1, 2, 1)
...
...
@@ -346,8 +346,8 @@ class SubmConv3D(_Conv3D):
dense_shape = [1, 1, 3, 4, 1]
indices = paddle.to_tensor(indices, dtype='int32')
values = paddle.to_tensor(values, dtype='float32')
sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
subm_conv = paddle.
sparse
.SubmConv3D(1, 1, (1, 3, 3))
sparse_x = paddle.
incubate.
sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
subm_conv = paddle.
incubate.sparse.nn
.SubmConv3D(1, 1, (1, 3, 3))
y = subm_conv(sparse_x)
print(y.shape)
# (1, 1, 3, 4, 1)
...
...
python/paddle/
sparse
/layer/norm.py
→
python/paddle/
incubate/sparse/nn
/layer/norm.py
浏览文件 @
13add823
...
...
@@ -100,7 +100,7 @@ class BatchNorm(paddle.nn.BatchNorm1D):
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
dense_x = paddle.to_tensor(x_data)
sparse_x = dense_x.to_sparse_coo(4)
batch_norm = paddle.
sparse
.BatchNorm(channels)
batch_norm = paddle.
incubate.sparse.nn
.BatchNorm(channels)
batch_norm_out = batch_norm(sparse_x)
print(batch_norm_out.shape)
# [1, 6, 6, 6, 3]
...
...
@@ -153,7 +153,7 @@ class BatchNorm(paddle.nn.BatchNorm1D):
data_format
=
'NC'
,
use_global_stats
=
self
.
_use_global_stats
)
return
paddle
.
sparse
.
sparse_coo_tensor
(
return
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
input
.
indices
(),
batch_norm_out
,
shape
=
input
.
shape
,
...
...
python/paddle/
sparse
/layer/pooling.py
→
python/paddle/
incubate/sparse/nn
/layer/pooling.py
浏览文件 @
13add823
...
...
@@ -66,7 +66,7 @@ class MaxPool3D(Layer):
with _test_eager_guard():
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.
sparse
.MaxPool3D(
max_pool3d = paddle.
incubate.sparse.nn
.MaxPool3D(
kernel_size=3, data_format='NDHWC')
out = max_pool3d(sparse_x)
#shape=[2, 1, 2, 2, 3]
...
...
python/paddle/
sparse/functional
/unary.py
→
python/paddle/
incubate/sparse
/unary.py
浏览文件 @
13add823
...
...
@@ -17,44 +17,6 @@ __all__ = []
from
paddle
import
_C_ops
,
in_dynamic_mode
def
relu
(
x
,
name
=
None
):
"""
sparse relu activation, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = max(x, 0)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.functional.relu(sparse_x)
"""
assert
in_dynamic_mode
(),
"Currently, Sparse API only support dynamic mode"
if
x
.
is_sparse_coo
()
or
x
.
is_sparse_csr
():
return
_C_ops
.
final_state_sparse_relu
(
x
)
else
:
raise
ValueError
(
"Currently, sparse.relu only support the input of SparseCooTensor or SparseCsrTensor"
)
def
tanh
(
x
,
name
=
None
):
"""
sparse tanh activation, requiring x to be a sparse coo or sparse csr tensor.
...
...
@@ -80,7 +42,7 @@ def tanh(x, name=None):
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.tanh(sparse_x)
out = paddle.
incubate.
sparse.tanh(sparse_x)
"""
assert
in_dynamic_mode
(),
"Currently, Sparse API only support dynamic mode"
...
...
@@ -118,7 +80,7 @@ def sqrt(x, name=None):
with _test_eager_guard():
dense_x = paddle.to_tensor([4, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.sqrt(sparse_x)
out = paddle.
incubate.
sparse.sqrt(sparse_x)
"""
assert
in_dynamic_mode
(),
"Currently, Sparse API only support dynamic mode"
...
...
@@ -156,7 +118,7 @@ def sin(x, name=None):
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.sin(sparse_x)
out = paddle.
incubate.
sparse.sin(sparse_x)
"""
assert
in_dynamic_mode
(),
"Currently, Sparse API only support dynamic mode"
...
...
python/setup.py.in
浏览文件 @
13add823
...
...
@@ -374,6 +374,10 @@ packages=['paddle',
'paddle.incubate.distributed.models',
'paddle.incubate.distributed.models.moe',
'paddle.incubate.distributed.models.moe.gate',
'paddle.incubate.sparse',
'paddle.incubate.sparse.nn',
'paddle.incubate.sparse.nn.layer',
'paddle.incubate.sparse.nn.functional',
'paddle.io',
'paddle.optimizer',
'paddle.nn',
...
...
@@ -394,9 +398,6 @@ packages=['paddle',
'paddle.device.cuda',
'paddle.version',
'paddle.profiler',
'paddle.sparse',
'paddle.sparse.layer',
'paddle.sparse.functional',
]
with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录