Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
fd6b1a02
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fd6b1a02
编写于
7月 13, 2022
作者:
Z
zhangkaihuo
提交者:
GitHub
7月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add sparse.coalesce (#44256)
* add sparse api coalesce
上级
77c010a0
变更
11
显示空白变更内容
内联
并排
Showing
11 changed file
with
88 addition
and
44 deletion
+88
-44
paddle/phi/api/yaml/sparse_api.yaml
paddle/phi/api/yaml/sparse_api.yaml
+7
-0
paddle/phi/kernels/sparse/coalesce_kernel.h
paddle/phi/kernels/sparse/coalesce_kernel.h
+10
-3
paddle/phi/kernels/sparse/cpu/coalesce_kernel.cc
paddle/phi/kernels/sparse/cpu/coalesce_kernel.cc
+11
-11
paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu
paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu
+13
-22
paddle/phi/kernels/sparse/sparse_utils_kernel.h
paddle/phi/kernels/sparse/sparse_utils_kernel.h
+2
-4
paddle/phi/tests/kernels/test_sparse_conv3d_dev_api.cc
paddle/phi/tests/kernels/test_sparse_conv3d_dev_api.cc
+5
-2
paddle/phi/tests/kernels/test_sparse_pool_dev_api.cc
paddle/phi/tests/kernels/test_sparse_pool_dev_api.cc
+4
-2
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
+1
-0
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
+2
-0
python/paddle/incubate/sparse/__init__.py
python/paddle/incubate/sparse/__init__.py
+2
-0
python/paddle/incubate/sparse/unary.py
python/paddle/incubate/sparse/unary.py
+31
-0
未找到文件。
paddle/phi/api/yaml/sparse_api.yaml
浏览文件 @
fd6b1a02
...
...
@@ -266,6 +266,13 @@
layout
:
x
backward
:
values_grad
-
api
:
coalesce
args
:
(Tensor x)
output
:
Tensor(out)
kernel
:
func
:
coalesce{sparse_coo -> sparse_coo}
layout
:
x
-
api
:
full_like
args
:
(Tensor x, Scalar value, DataType dtype=DataType::UNDEFINED)
output
:
Tensor(out)
...
...
paddle/phi/kernels/sparse/coalesce
d
_kernel.h
→
paddle/phi/kernels/sparse/coalesce_kernel.h
浏览文件 @
fd6b1a02
...
...
@@ -22,9 +22,16 @@ namespace phi {
namespace
sparse
{
template
<
typename
T
,
typename
Context
>
void
Coalesce
d
Kernel
(
const
Context
&
dev_ctx
,
void
CoalesceKernel
(
const
Context
&
dev_ctx
,
const
SparseCooTensor
&
x
,
SparseCooTensor
*
out
);
template
<
typename
T
,
typename
Context
>
SparseCooTensor
Coalesce
(
const
Context
&
dev_ctx
,
const
SparseCooTensor
&
x
)
{
SparseCooTensor
coo
;
CoalesceKernel
<
T
,
Context
>
(
dev_ctx
,
x
,
&
coo
);
return
coo
;
}
}
// namespace sparse
}
// namespace phi
paddle/phi/kernels/sparse/cpu/coalesce
d
_kernel.cc
→
paddle/phi/kernels/sparse/cpu/coalesce_kernel.cc
浏览文件 @
fd6b1a02
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/coalesce
d
_kernel.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
...
...
@@ -22,7 +22,7 @@ namespace phi {
namespace
sparse
{
template
<
typename
T
,
typename
IntT
>
void
Coalesce
d
CPUKernel
(
const
CPUContext
&
dev_ctx
,
void
CoalesceCPUKernel
(
const
CPUContext
&
dev_ctx
,
const
SparseCooTensor
&
x
,
SparseCooTensor
*
out
)
{
const
DenseTensor
&
x_indices
=
x
.
non_zero_indices
();
...
...
@@ -95,22 +95,22 @@ void CoalescedCPUKernel(const CPUContext& dev_ctx,
}
template
<
typename
T
,
typename
Context
>
void
Coalesce
d
Kernel
(
const
Context
&
dev_ctx
,
void
CoalesceKernel
(
const
Context
&
dev_ctx
,
const
SparseCooTensor
&
x
,
SparseCooTensor
*
out
)
{
PD_VISIT_INTEGRAL_TYPES
(
x
.
non_zero_indices
().
dtype
(),
"Coalesce
d
CPUKernel"
,
([
&
]
{
Coalesce
d
CPUKernel
<
T
,
data_t
>
(
dev_ctx
,
x
,
out
);
x
.
non_zero_indices
().
dtype
(),
"CoalesceCPUKernel"
,
([
&
]
{
CoalesceCPUKernel
<
T
,
data_t
>
(
dev_ctx
,
x
,
out
);
}));
}
}
// namespace sparse
}
// namespace phi
PD_REGISTER_KERNEL
(
sort
,
PD_REGISTER_KERNEL
(
coalesce
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
Coalesce
d
Kernel
,
phi
::
sparse
::
CoalesceKernel
,
float
,
double
,
phi
::
dtype
::
float16
,
...
...
paddle/phi/kernels/sparse/gpu/coalesce
d
_kernel.cu
→
paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu
浏览文件 @
fd6b1a02
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/coalesce
d
_kernel.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
...
...
@@ -27,7 +27,7 @@ namespace phi {
namespace
sparse
{
template
<
typename
T
,
typename
IntT
>
void
Coalesce
d
GPUKernel
(
const
GPUContext
&
dev_ctx
,
void
CoalesceGPUKernel
(
const
GPUContext
&
dev_ctx
,
const
SparseCooTensor
&
x
,
SparseCooTensor
*
out
)
{
const
DenseTensor
&
x_indices
=
x
.
non_zero_indices
();
...
...
@@ -55,11 +55,7 @@ void CoalescedGPUKernel(const GPUContext& dev_ctx,
phi
::
backends
::
gpu
::
GpuMemcpyAsync
(
d_sparse_offsets
.
data
<
IntT
>
(),
sparse_offsets
.
data
(),
sizeof
(
IntT
)
*
sparse_dim
,
#ifdef PADDLE_WITH_HIP
hipMemcpyHostToDevice
,
#else
cudaMemcpyHostToDevice
,
#endif
gpuMemcpyHostToDevice
,
dev_ctx
.
stream
());
// 1. flatten indices
...
...
@@ -117,11 +113,7 @@ void CoalescedGPUKernel(const GPUContext& dev_ctx,
phi
::
backends
::
gpu
::
GpuMemcpyAsync
(
&
out_nnz
,
out_indices
.
data
<
IntT
>
(),
sizeof
(
IntT
),
#ifdef PADDLE_WITH_HIP
hipMemcpyDeviceToHost
,
#else
cudaMemcpyDeviceToHost
,
#endif
gpuMemcpyDeviceToHost
,
dev_ctx
.
stream
());
dev_ctx
.
Wait
();
...
...
@@ -161,22 +153,21 @@ void CoalescedGPUKernel(const GPUContext& dev_ctx,
}
template
<
typename
T
,
typename
Context
>
void
Coalesce
d
Kernel
(
const
Context
&
dev_ctx
,
void
CoalesceKernel
(
const
Context
&
dev_ctx
,
const
SparseCooTensor
&
x
,
SparseCooTensor
*
out
)
{
PD_VISIT_INTEGRAL_TYPES
(
x
.
non_zero_indices
().
dtype
(),
"Coalesce
d
GPUKernel"
,
([
&
]
{
Coalesce
d
GPUKernel
<
T
,
data_t
>
(
dev_ctx
,
x
,
out
);
x
.
non_zero_indices
().
dtype
(),
"CoalesceGPUKernel"
,
([
&
]
{
CoalesceGPUKernel
<
T
,
data_t
>
(
dev_ctx
,
x
,
out
);
}));
}
}
// namespace sparse
}
// namespace phi
PD_REGISTER_KERNEL
(
sort
,
PD_REGISTER_KERNEL
(
coalesce
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
Coalesce
d
Kernel
,
phi
::
sparse
::
CoalesceKernel
,
float
,
double
,
phi
::
dtype
::
float16
,
...
...
paddle/phi/kernels/sparse/sparse_utils_kernel.h
浏览文件 @
fd6b1a02
...
...
@@ -19,7 +19,6 @@ limitations under the License. */
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/sparse/coalesced_kernel.h"
namespace
phi
{
namespace
sparse
{
...
...
@@ -154,9 +153,8 @@ void SparseCooTensorKernel(const Context& dev_ctx,
const
DenseTensor
&
indices
,
const
IntArray
&
dense_shape
,
SparseCooTensor
*
out
)
{
SparseCooTensor
before_coalesced
(
indices
,
values
,
phi
::
make_ddim
(
dense_shape
.
GetData
()));
CoalescedKernel
<
T
,
Context
>
(
dev_ctx
,
before_coalesced
,
out
);
*
out
=
SparseCooTensor
(
indices
,
values
,
phi
::
make_ddim
(
dense_shape
.
GetData
()));
}
}
// namespace sparse
...
...
paddle/phi/tests/kernels/test_sparse_conv3d_dev_api.cc
浏览文件 @
fd6b1a02
...
...
@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
...
...
@@ -207,6 +208,8 @@ void TestConv3dBase(const std::vector<IntT>& indices,
subm
,
&
d_rulebook
);
SparseCooTensor
tmp_d_out
=
sparse
::
Coalesce
<
T
>
(
dev_ctx_gpu
,
d_out
);
ASSERT_EQ
(
correct_out_dims
.
size
(),
d_out
.
dims
().
size
());
ASSERT_EQ
((
int64_t
)
correct_out_features
.
size
()
/
out_channels
,
d_out
.
nnz
());
for
(
int
i
=
0
;
i
<
correct_out_dims
.
size
();
i
++
)
{
...
...
@@ -217,7 +220,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
dev_ctx_cpu
,
DenseTensorMeta
(
indices_dtype
,
{
4
,
d_out
.
nnz
()},
DataLayout
::
NCHW
));
phi
::
Copy
(
dev_ctx_gpu
,
d_out
.
non_zero_indices
(),
tmp_
d_out
.
non_zero_indices
(),
phi
::
CPUPlace
(),
true
,
&
h_indices_tensor
);
...
...
@@ -231,7 +234,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
phi
::
EmptyLike
<
T
>
(
dev_ctx_cpu
,
d_out
.
non_zero_elements
());
phi
::
Copy
(
dev_ctx_gpu
,
d_out
.
non_zero_elements
(),
tmp_
d_out
.
non_zero_elements
(),
phi
::
CPUPlace
(),
true
,
&
h_features_tensor
);
...
...
paddle/phi/tests/kernels/test_sparse_pool_dev_api.cc
浏览文件 @
fd6b1a02
...
...
@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_pool_grad_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_pool_kernel.h"
...
...
@@ -157,6 +158,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
dilations
,
strides
,
&
d_rulebook
);
SparseCooTensor
tmp_d_out
=
sparse
::
Coalesce
<
T
>
(
dev_ctx_gpu
,
d_out
);
ASSERT_EQ
(
correct_out_dims
.
size
(),
d_out
.
dims
().
size
());
ASSERT_EQ
((
int64_t
)
correct_out_features
.
size
()
/
out_channels
,
d_out
.
nnz
());
...
...
@@ -168,7 +170,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
dev_ctx_cpu
,
DenseTensorMeta
(
indices_dtype
,
{
4
,
d_out
.
nnz
()},
DataLayout
::
NCHW
));
phi
::
Copy
(
dev_ctx_gpu
,
d_out
.
non_zero_indices
(),
tmp_
d_out
.
non_zero_indices
(),
phi
::
CPUPlace
(),
true
,
&
h_indices_tensor
);
...
...
@@ -182,7 +184,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
phi
::
EmptyLike
<
T
>
(
dev_ctx_cpu
,
d_out
.
non_zero_elements
());
phi
::
Copy
(
dev_ctx_gpu
,
d_out
.
non_zero_elements
(),
tmp_
d_out
.
non_zero_elements
(),
phi
::
CPUPlace
(),
true
,
&
h_features_tensor
);
...
...
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
浏览文件 @
fd6b1a02
...
...
@@ -53,6 +53,7 @@ class TestSparseConv(unittest.TestCase):
groups
=
1
,
data_format
=
"NDHWC"
)
out
.
backward
(
out
)
out
=
paddle
.
incubate
.
sparse
.
coalesce
(
out
)
assert
np
.
array_equal
(
correct_out_values
,
out
.
values
().
numpy
())
def
test_subm_conv3d
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
浏览文件 @
fd6b1a02
...
...
@@ -298,6 +298,7 @@ class TestSparseConvert(unittest.TestCase):
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
sparse_x
=
paddle
.
incubate
.
sparse
.
coalesce
(
sparse_x
)
indices_sorted
=
[[
0
,
1
],
[
1
,
0
]]
values_sorted
=
[
5.0
,
1.0
]
assert
np
.
array_equal
(
indices_sorted
,
...
...
@@ -310,6 +311,7 @@ class TestSparseConvert(unittest.TestCase):
values
=
paddle
.
to_tensor
(
values
,
dtype
=
'float32'
)
sparse_x
=
paddle
.
incubate
.
sparse
.
sparse_coo_tensor
(
indices
,
values
)
sparse_x
=
paddle
.
incubate
.
sparse
.
coalesce
(
sparse_x
)
values_sorted
=
[[
5.0
,
5.0
],
[
1.0
,
1.0
]]
assert
np
.
array_equal
(
indices_sorted
,
sparse_x
.
indices
().
numpy
())
...
...
python/paddle/incubate/sparse/__init__.py
浏览文件 @
fd6b1a02
...
...
@@ -30,6 +30,7 @@ from .unary import abs
from
.unary
import
pow
from
.unary
import
cast
from
.unary
import
neg
from
.unary
import
coalesce
from
.binary
import
mv
from
.binary
import
matmul
...
...
@@ -66,4 +67,5 @@ __all__ = [
'subtract'
,
'multiply'
,
'divide'
,
'coalesce'
,
]
python/paddle/incubate/sparse/unary.py
浏览文件 @
fd6b1a02
...
...
@@ -472,3 +472,34 @@ def abs(x, name=None):
"""
return
_C_ops
.
final_state_sparse_abs
(
x
)
@
dygraph_only
def
coalesce
(
x
):
r
"""
the coalesced operator include sorted and merge, after coalesced, the indices of x is sorted and unique.
Parameters:
x (Tensor): the input SparseCooTensor.
Returns:
Tensor: return the SparseCooTensor after coalesced.
Examples:
.. code-block:: python
import paddle
from paddle.incubate import sparse
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
indices = [[0, 0, 1], [1, 1, 2]]
values = [1.0, 2.0, 3.0]
sp_x = sparse.sparse_coo_tensor(indices, values)
sp_x = sparse.coalesce(sp_x)
print(sp_x.indices())
#[[0, 1], [1, 2]]
print(sp_x.values())
#[3.0, 3.0]
"""
return
_C_ops
.
final_state_sparse_coalesce
(
x
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录