未验证 提交 a8f86600 编写于 作者: Z zhangkaihuo 提交者: GitHub

Add sparse convertion api and sparse creation api (#40780)

上级 f95f3a65
...@@ -40,6 +40,9 @@ limitations under the License. */ ...@@ -40,6 +40,9 @@ limitations under the License. */
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
...@@ -468,6 +471,90 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args, ...@@ -468,6 +471,90 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_api_sparse_coo_tensor(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto non_zero_indices = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 2), 2);
auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
PADDLE_ENFORCE(non_zero_indices.is_dense_tensor(),
paddle::platform::errors::Fatal(
"the non-zero indices must be a DenseTensor."));
PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
paddle::platform::errors::Fatal(
"the non-zero elements must be a DenseTensor."));
auto dense_indices =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_indices.impl());
auto dense_elements =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
// TODO(zhangkaihuo): After create SparseTensor, call coalesced() to sort and
// merge duplicate indices
std::shared_ptr<phi::SparseCooTensor> coo_tensor =
std::make_shared<phi::SparseCooTensor>(*dense_indices, *dense_elements,
phi::make_ddim(dense_shape));
paddle::experimental::Tensor tensor;
tensor.set_impl(coo_tensor);
auto name =
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
tensor.set_name(name);
auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
if (!autograd_meta->GetMutableGradNode()) {
VLOG(3) << "Tensor(" << name
<< ") have not GradNode, add GradNodeAccumulation for it.";
autograd_meta->SetGradNode(
std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
}
return ToPyObject(tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_sparse_csr_tensor(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto non_zero_crows = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
auto non_zero_cols = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 2), 2);
auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 3), 3);
auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
PADDLE_ENFORCE(non_zero_crows.is_dense_tensor(),
paddle::platform::errors::Fatal(
"the compressed non-zero rows must be a DenseTensor."));
PADDLE_ENFORCE(non_zero_cols.is_dense_tensor(),
paddle::platform::errors::Fatal(
"the non-zero cols must be a DenseTensor."));
PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
paddle::platform::errors::Fatal(
"the non-zero elements must be a DenseTensor."));
auto dense_crows =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_crows.impl());
auto dense_cols =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_cols.impl());
auto dense_elements =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
std::shared_ptr<phi::SparseCsrTensor> csr_tensor =
std::make_shared<phi::SparseCsrTensor>(*dense_crows, *dense_cols,
*dense_elements,
phi::make_ddim(dense_shape));
paddle::experimental::Tensor tensor;
tensor.set_impl(csr_tensor);
auto name =
egr::Controller::Instance().GenerateUniqueName("generated_tensor");
tensor.set_name(name);
auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
if (!autograd_meta->GetMutableGradNode()) {
VLOG(3) << "Tensor(" << name
<< ") have not GradNode, add GradNodeAccumulation for it.";
autograd_meta->SetGradNode(
std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
}
return ToPyObject(tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_functions[] = { PyMethodDef variable_functions[] = {
// TODO(jiabin): Remove scale when we have final state tests // TODO(jiabin): Remove scale when we have final state tests
{"scale", (PyCFunction)(void (*)(void))eager_api_scale, {"scale", (PyCFunction)(void (*)(void))eager_api_scale,
...@@ -490,6 +577,14 @@ PyMethodDef variable_functions[] = { ...@@ -490,6 +577,14 @@ PyMethodDef variable_functions[] = {
{"read_next_tensor_list", {"read_next_tensor_list",
(PyCFunction)(void (*)(void))eager_api_read_next_tensor_list, (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
/**sparse functions**/
{"sparse_coo_tensor",
(PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"sparse_csr_tensor",
(PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
/**sparse functions**/
{NULL, NULL, 0, NULL}}; {NULL, NULL, 0, NULL}};
void BindFunctions(PyObject* module) { void BindFunctions(PyObject* module) {
......
...@@ -1097,6 +1097,49 @@ static PyObject* tensor_method_is_sparse_csr(TensorObject* self, PyObject* args, ...@@ -1097,6 +1097,49 @@ static PyObject* tensor_method_is_sparse_csr(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* tensor_method_to_sparse_coo(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
int64_t sparse_dim = CastPyArg2AttrLong(PyTuple_GET_ITEM(args, 0), 0);
auto coo_tensor = self->tensor.to_sparse_coo(sparse_dim);
egr::EagerUtils::autograd_meta(&coo_tensor)
->SetStopGradient(
egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient());
egr::EagerUtils::autograd_meta(&coo_tensor)
->SetPersistable(
egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable());
return ToPyObject(coo_tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor_method_to_sparse_csr(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto csr_tensor = self->tensor.to_sparse_csr();
egr::EagerUtils::autograd_meta(&csr_tensor)
->SetStopGradient(
egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient());
egr::EagerUtils::autograd_meta(&csr_tensor)
->SetPersistable(
egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable());
return ToPyObject(csr_tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor_method_to_dense(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto dense_tensor = self->tensor.to_dense();
egr::EagerUtils::autograd_meta(&dense_tensor)
->SetStopGradient(
egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient());
egr::EagerUtils::autograd_meta(&dense_tensor)
->SetPersistable(
egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable());
return ToPyObject(dense_tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__inplace_version(TensorObject* self, PyObject* args, static PyObject* tensor__inplace_version(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
...@@ -1185,6 +1228,12 @@ PyMethodDef variable_methods[] = { ...@@ -1185,6 +1228,12 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"is_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_is_sparse_csr, {"is_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_is_sparse_csr,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"to_sparse_coo", (PyCFunction)(void (*)(void))tensor_method_to_sparse_coo,
METH_VARARGS | METH_KEYWORDS, NULL},
{"to_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_to_sparse_csr,
METH_VARARGS | METH_KEYWORDS, NULL},
{"to_dense", (PyCFunction)(void (*)(void))tensor_method_to_dense,
METH_VARARGS | METH_KEYWORDS, NULL},
/***the method of sparse tensor****/ /***the method of sparse tensor****/
{"_inplace_version", (PyCFunction)(void (*)(void))tensor__inplace_version, {"_inplace_version", (PyCFunction)(void (*)(void))tensor__inplace_version,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
......
...@@ -518,6 +518,30 @@ class PADDLE_API Tensor final { ...@@ -518,6 +518,30 @@ class PADDLE_API Tensor final {
/* Part 10: Auto generated Tensor methods */ /* Part 10: Auto generated Tensor methods */
/* Part 11: Methods of converting SparseTensor and DenseTensor to each other
*/
/**
* @brief Convert DenseTensor or SparseCsrTensor to SparseCooTensor
*
* @param sparse_dim, The number of sparse dimensions
* @return Tensor
*/
Tensor to_sparse_coo(const int64_t sparse_dim) const;
/**
* @brief Convert DenseTensor or SparseCooTensor to SparseCsrTensor
*
* @return Tensor
*/
Tensor to_sparse_csr() const;
/**
* @brief Convert SparseCooTensor or SparseCsrTensor to DenseTensor
*
* @return Tensor
*/
Tensor to_dense() const;
private: private:
/** /**
* [ Why use abstract TensorImpl interface here? ] * [ Why use abstract TensorImpl interface here? ]
......
...@@ -149,4 +149,4 @@ cc_library(phi_bw_function_api SRCS ${bw_api_source_file} DEPS phi_tensor_raw ph ...@@ -149,4 +149,4 @@ cc_library(phi_bw_function_api SRCS ${bw_api_source_file} DEPS phi_tensor_raw ph
cc_library(sparse_api SRCS ${sparse_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api_custom_impl) cc_library(sparse_api SRCS ${sparse_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api_custom_impl)
cc_library(sparse_bw_api SRCS ${sparse_bw_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api sparse_api_custom_impl) cc_library(sparse_bw_api SRCS ${sparse_bw_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api sparse_api_custom_impl)
cc_library(phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta) cc_library(phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta sparse_api)
...@@ -19,6 +19,7 @@ limitations under the License. */ ...@@ -19,6 +19,7 @@ limitations under the License. */
#include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/tensor_base.h" #include "paddle/phi/core/tensor_base.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/lib/api_gen_utils.h" #include "paddle/phi/api/lib/api_gen_utils.h"
#include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/infermeta/unary.h" #include "paddle/phi/infermeta/unary.h"
...@@ -183,5 +184,17 @@ void Tensor::copy_(const Tensor &src, ...@@ -183,5 +184,17 @@ void Tensor::copy_(const Tensor &src,
} }
} }
Tensor Tensor::to_sparse_coo(const int64_t sparse_dim) const {
return experimental::sparse::to_sparse_coo(*this, sparse_dim);
}
Tensor Tensor::to_sparse_csr() const {
return experimental::sparse::to_sparse_csr(*this);
}
Tensor Tensor::to_dense() const {
return experimental::sparse::to_dense(*this);
}
} // namespace experimental } // namespace experimental
} // namespace paddle } // namespace paddle
...@@ -17,25 +17,53 @@ import unittest ...@@ -17,25 +17,53 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle import _C_ops from paddle import _C_ops
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
class TestSparseUtils(unittest.TestCase): class TestSparseUtils(unittest.TestCase):
def test_create_sparse_coo_tensor(self):
with _test_eager_guard():
non_zero_indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
non_zero_elements = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
dense_indices = paddle.to_tensor(non_zero_indices)
dense_elements = paddle.to_tensor(
non_zero_elements, dtype='float32')
stop_gradient = False
coo = core.eager.sparse_coo_tensor(dense_indices, dense_elements,
dense_shape, stop_gradient)
print(coo)
def test_create_sparse_csr_tensor(self):
with _test_eager_guard():
non_zero_crows = [0, 2, 3, 5]
non_zero_cols = [1, 3, 2, 0, 1]
non_zero_elements = [1, 2, 3, 4, 5]
dense_shape = [3, 4]
dense_crows = paddle.to_tensor(non_zero_crows)
dense_cols = paddle.to_tensor(non_zero_cols)
dense_elements = paddle.to_tensor(
non_zero_elements, dtype='float32')
stop_gradient = False
csr = core.eager.sparse_csr_tensor(dense_crows, dense_cols,
dense_elements, dense_shape,
stop_gradient)
print(csr)
def test_to_sparse_coo(self): def test_to_sparse_coo(self):
with _test_eager_guard(): with _test_eager_guard():
x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]] x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]]
non_zero_indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] non_zero_indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]]
non_zero_elements = [1, 2, 3, 4, 5] non_zero_elements = [1, 2, 3, 4, 5]
dense_x = paddle.to_tensor(x) dense_x = paddle.to_tensor(x)
#TODO(zhangkaihuo): change to test the corresponding API out = dense_x.to_sparse_coo(2)
out = _C_ops.final_state_to_sparse_coo(dense_x, 2)
print(out)
assert np.array_equal(out.non_zero_indices().numpy(), assert np.array_equal(out.non_zero_indices().numpy(),
non_zero_indices) non_zero_indices)
assert np.array_equal(out.non_zero_elements().numpy(), assert np.array_equal(out.non_zero_elements().numpy(),
non_zero_elements) non_zero_elements)
dense_tensor = _C_ops.final_state_to_dense(out) dense_tensor = out.to_dense()
assert np.array_equal(dense_tensor.numpy(), x) assert np.array_equal(dense_tensor.numpy(), x)
def test_to_sparse_csr(self): def test_to_sparse_csr(self):
...@@ -45,14 +73,14 @@ class TestSparseUtils(unittest.TestCase): ...@@ -45,14 +73,14 @@ class TestSparseUtils(unittest.TestCase):
non_zero_cols = [1, 3, 2, 0, 1] non_zero_cols = [1, 3, 2, 0, 1]
non_zero_elements = [1, 2, 3, 4, 5] non_zero_elements = [1, 2, 3, 4, 5]
dense_x = paddle.to_tensor(x) dense_x = paddle.to_tensor(x)
out = _C_ops.final_state_to_sparse_csr(dense_x) out = dense_x.to_sparse_csr()
print(out) print(out)
assert np.array_equal(out.non_zero_crows().numpy(), non_zero_crows) assert np.array_equal(out.non_zero_crows().numpy(), non_zero_crows)
assert np.array_equal(out.non_zero_cols().numpy(), non_zero_cols) assert np.array_equal(out.non_zero_cols().numpy(), non_zero_cols)
assert np.array_equal(out.non_zero_elements().numpy(), assert np.array_equal(out.non_zero_elements().numpy(),
non_zero_elements) non_zero_elements)
dense_tensor = _C_ops.final_state_to_dense(out) dense_tensor = out.to_dense()
assert np.array_equal(dense_tensor.numpy(), x) assert np.array_equal(dense_tensor.numpy(), x)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册