Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
a8f86600
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a8f86600
编写于
3月 24, 2022
作者:
Z
zhangkaihuo
提交者:
GitHub
3月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add sparse convertion api and sparse creation api (#40780)
上级
f95f3a65
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
216 addition
and
7 deletion
+216
-7
paddle/fluid/pybind/eager_functions.cc
paddle/fluid/pybind/eager_functions.cc
+95
-0
paddle/fluid/pybind/eager_method.cc
paddle/fluid/pybind/eager_method.cc
+49
-0
paddle/phi/api/include/tensor.h
paddle/phi/api/include/tensor.h
+24
-0
paddle/phi/api/lib/CMakeLists.txt
paddle/phi/api/lib/CMakeLists.txt
+1
-1
paddle/phi/api/lib/tensor_method.cc
paddle/phi/api/lib/tensor_method.cc
+13
-0
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
+34
-6
未找到文件。
paddle/fluid/pybind/eager_functions.cc
浏览文件 @
a8f86600
...
...
@@ -40,6 +40,9 @@ limitations under the License. */
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
namespace
paddle
{
namespace
pybind
{
...
...
@@ -468,6 +471,90 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
eager_api_sparse_coo_tensor
(
PyObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
auto
non_zero_indices
=
CastPyArg2Tensor
(
PyTuple_GET_ITEM
(
args
,
0
),
0
);
auto
non_zero_elements
=
CastPyArg2Tensor
(
PyTuple_GET_ITEM
(
args
,
1
),
1
);
auto
dense_shape
=
CastPyArg2VectorOfInt
(
PyTuple_GET_ITEM
(
args
,
2
),
2
);
auto
stop_gradient
=
CastPyArg2AttrBoolean
(
PyTuple_GET_ITEM
(
args
,
3
),
3
);
PADDLE_ENFORCE
(
non_zero_indices
.
is_dense_tensor
(),
paddle
::
platform
::
errors
::
Fatal
(
"the non-zero indices must be a DenseTensor."
));
PADDLE_ENFORCE
(
non_zero_elements
.
is_dense_tensor
(),
paddle
::
platform
::
errors
::
Fatal
(
"the non-zero elements must be a DenseTensor."
));
auto
dense_indices
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
non_zero_indices
.
impl
());
auto
dense_elements
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
non_zero_elements
.
impl
());
// TODO(zhangkaihuo): After create SparseTensor, call coalesced() to sort and
// merge duplicate indices
std
::
shared_ptr
<
phi
::
SparseCooTensor
>
coo_tensor
=
std
::
make_shared
<
phi
::
SparseCooTensor
>
(
*
dense_indices
,
*
dense_elements
,
phi
::
make_ddim
(
dense_shape
));
paddle
::
experimental
::
Tensor
tensor
;
tensor
.
set_impl
(
coo_tensor
);
auto
name
=
egr
::
Controller
::
Instance
().
GenerateUniqueName
(
"generated_tensor"
);
tensor
.
set_name
(
name
);
auto
autograd_meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
tensor
);
autograd_meta
->
SetStopGradient
(
static_cast
<
bool
>
(
stop_gradient
));
if
(
!
autograd_meta
->
GetMutableGradNode
())
{
VLOG
(
3
)
<<
"Tensor("
<<
name
<<
") have not GradNode, add GradNodeAccumulation for it."
;
autograd_meta
->
SetGradNode
(
std
::
make_shared
<
egr
::
GradNodeAccumulation
>
(
autograd_meta
));
}
return
ToPyObject
(
tensor
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
eager_api_sparse_csr_tensor
(
PyObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
auto
non_zero_crows
=
CastPyArg2Tensor
(
PyTuple_GET_ITEM
(
args
,
0
),
0
);
auto
non_zero_cols
=
CastPyArg2Tensor
(
PyTuple_GET_ITEM
(
args
,
1
),
1
);
auto
non_zero_elements
=
CastPyArg2Tensor
(
PyTuple_GET_ITEM
(
args
,
2
),
2
);
auto
dense_shape
=
CastPyArg2VectorOfInt
(
PyTuple_GET_ITEM
(
args
,
3
),
3
);
auto
stop_gradient
=
CastPyArg2AttrBoolean
(
PyTuple_GET_ITEM
(
args
,
4
),
4
);
PADDLE_ENFORCE
(
non_zero_crows
.
is_dense_tensor
(),
paddle
::
platform
::
errors
::
Fatal
(
"the compressed non-zero rows must be a DenseTensor."
));
PADDLE_ENFORCE
(
non_zero_cols
.
is_dense_tensor
(),
paddle
::
platform
::
errors
::
Fatal
(
"the non-zero cols must be a DenseTensor."
));
PADDLE_ENFORCE
(
non_zero_elements
.
is_dense_tensor
(),
paddle
::
platform
::
errors
::
Fatal
(
"the non-zero elements must be a DenseTensor."
));
auto
dense_crows
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
non_zero_crows
.
impl
());
auto
dense_cols
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
non_zero_cols
.
impl
());
auto
dense_elements
=
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
non_zero_elements
.
impl
());
std
::
shared_ptr
<
phi
::
SparseCsrTensor
>
csr_tensor
=
std
::
make_shared
<
phi
::
SparseCsrTensor
>
(
*
dense_crows
,
*
dense_cols
,
*
dense_elements
,
phi
::
make_ddim
(
dense_shape
));
paddle
::
experimental
::
Tensor
tensor
;
tensor
.
set_impl
(
csr_tensor
);
auto
name
=
egr
::
Controller
::
Instance
().
GenerateUniqueName
(
"generated_tensor"
);
tensor
.
set_name
(
name
);
auto
autograd_meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
tensor
);
autograd_meta
->
SetStopGradient
(
static_cast
<
bool
>
(
stop_gradient
));
if
(
!
autograd_meta
->
GetMutableGradNode
())
{
VLOG
(
3
)
<<
"Tensor("
<<
name
<<
") have not GradNode, add GradNodeAccumulation for it."
;
autograd_meta
->
SetGradNode
(
std
::
make_shared
<
egr
::
GradNodeAccumulation
>
(
autograd_meta
));
}
return
ToPyObject
(
tensor
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef
variable_functions
[]
=
{
// TODO(jiabin): Remove scale when we have final state tests
{
"scale"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api_scale
,
...
...
@@ -490,6 +577,14 @@ PyMethodDef variable_functions[] = {
{
"read_next_tensor_list"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api_read_next_tensor_list
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
/**sparse functions**/
{
"sparse_coo_tensor"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api_sparse_coo_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"sparse_csr_tensor"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api_sparse_csr_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
/**sparse functions**/
{
NULL
,
NULL
,
0
,
NULL
}};
void
BindFunctions
(
PyObject
*
module
)
{
...
...
paddle/fluid/pybind/eager_method.cc
浏览文件 @
a8f86600
...
...
@@ -1097,6 +1097,49 @@ static PyObject* tensor_method_is_sparse_csr(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
tensor_method_to_sparse_coo
(
TensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
int64_t
sparse_dim
=
CastPyArg2AttrLong
(
PyTuple_GET_ITEM
(
args
,
0
),
0
);
auto
coo_tensor
=
self
->
tensor
.
to_sparse_coo
(
sparse_dim
);
egr
::
EagerUtils
::
autograd_meta
(
&
coo_tensor
)
->
SetStopGradient
(
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
tensor
)
->
StopGradient
());
egr
::
EagerUtils
::
autograd_meta
(
&
coo_tensor
)
->
SetPersistable
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
Persistable
());
return
ToPyObject
(
coo_tensor
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
tensor_method_to_sparse_csr
(
TensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
auto
csr_tensor
=
self
->
tensor
.
to_sparse_csr
();
egr
::
EagerUtils
::
autograd_meta
(
&
csr_tensor
)
->
SetStopGradient
(
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
tensor
)
->
StopGradient
());
egr
::
EagerUtils
::
autograd_meta
(
&
csr_tensor
)
->
SetPersistable
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
Persistable
());
return
ToPyObject
(
csr_tensor
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
tensor_method_to_dense
(
TensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
auto
dense_tensor
=
self
->
tensor
.
to_dense
();
egr
::
EagerUtils
::
autograd_meta
(
&
dense_tensor
)
->
SetStopGradient
(
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
tensor
)
->
StopGradient
());
egr
::
EagerUtils
::
autograd_meta
(
&
dense_tensor
)
->
SetPersistable
(
egr
::
EagerUtils
::
autograd_meta
(
&
(
self
->
tensor
))
->
Persistable
());
return
ToPyObject
(
dense_tensor
);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
tensor__inplace_version
(
TensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
...
...
@@ -1185,6 +1228,12 @@ PyMethodDef variable_methods[] = {
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"is_sparse_csr"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method_is_sparse_csr
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"to_sparse_coo"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method_to_sparse_coo
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"to_sparse_csr"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method_to_sparse_csr
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"to_dense"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method_to_dense
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
/***the method of sparse tensor****/
{
"_inplace_version"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor__inplace_version
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
...
...
paddle/phi/api/include/tensor.h
浏览文件 @
a8f86600
...
...
@@ -518,6 +518,30 @@ class PADDLE_API Tensor final {
/* Part 10: Auto generated Tensor methods */
/* Part 11: Methods of converting SparseTensor and DenseTensor to each other
*/
/**
* @brief Convert DenseTensor or SparseCsrTensor to SparseCooTensor
*
* @param sparse_dim, The number of sparse dimensions
* @return Tensor
*/
Tensor
to_sparse_coo
(
const
int64_t
sparse_dim
)
const
;
/**
* @brief Convert DenseTensor or SparseCooTensor to SparseCsrTensor
*
* @return Tensor
*/
Tensor
to_sparse_csr
()
const
;
/**
* @brief Convert SparseCooTensor or SparseCsrTensor to DenseTensor
*
* @return Tensor
*/
Tensor
to_dense
()
const
;
private:
/**
* [ Why use abstract TensorImpl interface here? ]
...
...
paddle/phi/api/lib/CMakeLists.txt
浏览文件 @
a8f86600
...
...
@@ -149,4 +149,4 @@ cc_library(phi_bw_function_api SRCS ${bw_api_source_file} DEPS phi_tensor_raw ph
cc_library
(
sparse_api SRCS
${
sparse_api_source_file
}
DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api_custom_impl
)
cc_library
(
sparse_bw_api SRCS
${
sparse_bw_api_source_file
}
DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api sparse_api_custom_impl
)
cc_library
(
phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta
)
cc_library
(
phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta
sparse_api
)
paddle/phi/api/lib/tensor_method.cc
浏览文件 @
a8f86600
...
...
@@ -19,6 +19,7 @@ limitations under the License. */
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/tensor_base.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/lib/api_gen_utils.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/infermeta/unary.h"
...
...
@@ -183,5 +184,17 @@ void Tensor::copy_(const Tensor &src,
}
}
Tensor
Tensor
::
to_sparse_coo
(
const
int64_t
sparse_dim
)
const
{
return
experimental
::
sparse
::
to_sparse_coo
(
*
this
,
sparse_dim
);
}
Tensor
Tensor
::
to_sparse_csr
()
const
{
return
experimental
::
sparse
::
to_sparse_csr
(
*
this
);
}
Tensor
Tensor
::
to_dense
()
const
{
return
experimental
::
sparse
::
to_dense
(
*
this
);
}
}
// namespace experimental
}
// namespace paddle
python/paddle/fluid/tests/unittests/test_sparse_utils_op.py
浏览文件 @
a8f86600
...
...
@@ -17,25 +17,53 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
class
TestSparseUtils
(
unittest
.
TestCase
):
def
test_create_sparse_coo_tensor
(
self
):
with
_test_eager_guard
():
non_zero_indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
non_zero_elements
=
[
1
,
2
,
3
,
4
,
5
]
dense_shape
=
[
3
,
4
]
dense_indices
=
paddle
.
to_tensor
(
non_zero_indices
)
dense_elements
=
paddle
.
to_tensor
(
non_zero_elements
,
dtype
=
'float32'
)
stop_gradient
=
False
coo
=
core
.
eager
.
sparse_coo_tensor
(
dense_indices
,
dense_elements
,
dense_shape
,
stop_gradient
)
print
(
coo
)
def
test_create_sparse_csr_tensor
(
self
):
with
_test_eager_guard
():
non_zero_crows
=
[
0
,
2
,
3
,
5
]
non_zero_cols
=
[
1
,
3
,
2
,
0
,
1
]
non_zero_elements
=
[
1
,
2
,
3
,
4
,
5
]
dense_shape
=
[
3
,
4
]
dense_crows
=
paddle
.
to_tensor
(
non_zero_crows
)
dense_cols
=
paddle
.
to_tensor
(
non_zero_cols
)
dense_elements
=
paddle
.
to_tensor
(
non_zero_elements
,
dtype
=
'float32'
)
stop_gradient
=
False
csr
=
core
.
eager
.
sparse_csr_tensor
(
dense_crows
,
dense_cols
,
dense_elements
,
dense_shape
,
stop_gradient
)
print
(
csr
)
def
test_to_sparse_coo
(
self
):
with
_test_eager_guard
():
x
=
[[
0
,
1
,
0
,
2
],
[
0
,
0
,
3
,
0
],
[
4
,
5
,
0
,
0
]]
non_zero_indices
=
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
3
,
2
,
0
,
1
]]
non_zero_elements
=
[
1
,
2
,
3
,
4
,
5
]
dense_x
=
paddle
.
to_tensor
(
x
)
#TODO(zhangkaihuo): change to test the corresponding API
out
=
_C_ops
.
final_state_to_sparse_coo
(
dense_x
,
2
)
print
(
out
)
out
=
dense_x
.
to_sparse_coo
(
2
)
assert
np
.
array_equal
(
out
.
non_zero_indices
().
numpy
(),
non_zero_indices
)
assert
np
.
array_equal
(
out
.
non_zero_elements
().
numpy
(),
non_zero_elements
)
dense_tensor
=
_C_ops
.
final_state_to_dense
(
out
)
dense_tensor
=
out
.
to_dense
(
)
assert
np
.
array_equal
(
dense_tensor
.
numpy
(),
x
)
def
test_to_sparse_csr
(
self
):
...
...
@@ -45,14 +73,14 @@ class TestSparseUtils(unittest.TestCase):
non_zero_cols
=
[
1
,
3
,
2
,
0
,
1
]
non_zero_elements
=
[
1
,
2
,
3
,
4
,
5
]
dense_x
=
paddle
.
to_tensor
(
x
)
out
=
_C_ops
.
final_state_to_sparse_csr
(
dense_x
)
out
=
dense_x
.
to_sparse_csr
(
)
print
(
out
)
assert
np
.
array_equal
(
out
.
non_zero_crows
().
numpy
(),
non_zero_crows
)
assert
np
.
array_equal
(
out
.
non_zero_cols
().
numpy
(),
non_zero_cols
)
assert
np
.
array_equal
(
out
.
non_zero_elements
().
numpy
(),
non_zero_elements
)
dense_tensor
=
_C_ops
.
final_state_to_dense
(
out
)
dense_tensor
=
out
.
to_dense
(
)
assert
np
.
array_equal
(
dense_tensor
.
numpy
(),
x
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录