diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index f3c48309e69fe8b40099b076f673e4ba8c8bcabd..30c34bb55f460417866e54520860df19921a335a 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -40,6 +40,9 @@ limitations under the License. */ #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/sparse_coo_tensor.h" +#include "paddle/phi/core/sparse_csr_tensor.h" + namespace paddle { namespace pybind { @@ -468,6 +471,90 @@ static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* eager_api_sparse_coo_tensor(PyObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + auto non_zero_indices = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); + auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1); + auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 2), 2); + auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3); + PADDLE_ENFORCE(non_zero_indices.is_dense_tensor(), + paddle::platform::errors::Fatal( + "the non-zero indices must be a DenseTensor.")); + PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(), + paddle::platform::errors::Fatal( + "the non-zero elements must be a DenseTensor.")); + auto dense_indices = + std::dynamic_pointer_cast(non_zero_indices.impl()); + auto dense_elements = + std::dynamic_pointer_cast(non_zero_elements.impl()); + // TODO(zhangkaihuo): After create SparseTensor, call coalesced() to sort and + // merge duplicate indices + std::shared_ptr coo_tensor = + std::make_shared(*dense_indices, *dense_elements, + phi::make_ddim(dense_shape)); + paddle::experimental::Tensor tensor; + tensor.set_impl(coo_tensor); + auto name = + egr::Controller::Instance().GenerateUniqueName("generated_tensor"); + tensor.set_name(name); + auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor); + autograd_meta->SetStopGradient(static_cast(stop_gradient)); + if (!autograd_meta->GetMutableGradNode()) { + VLOG(3) << "Tensor(" << name + << ") have not GradNode, add GradNodeAccumulation for it."; + autograd_meta->SetGradNode( + std::make_shared(autograd_meta)); + } + return ToPyObject(tensor); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* eager_api_sparse_csr_tensor(PyObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + auto non_zero_crows = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); + auto non_zero_cols = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1); + auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 2), 2); + auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 3), 3); + auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4); + PADDLE_ENFORCE(non_zero_crows.is_dense_tensor(), + paddle::platform::errors::Fatal( + "the compressed non-zero rows must be a DenseTensor.")); + PADDLE_ENFORCE(non_zero_cols.is_dense_tensor(), + paddle::platform::errors::Fatal( + "the non-zero cols must be a DenseTensor.")); + PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(), + paddle::platform::errors::Fatal( + "the non-zero elements must be a DenseTensor.")); + + auto dense_crows = + std::dynamic_pointer_cast(non_zero_crows.impl()); + auto dense_cols = + std::dynamic_pointer_cast(non_zero_cols.impl()); + auto dense_elements = + std::dynamic_pointer_cast(non_zero_elements.impl()); + std::shared_ptr csr_tensor = + std::make_shared(*dense_crows, *dense_cols, + *dense_elements, + phi::make_ddim(dense_shape)); + paddle::experimental::Tensor tensor; + tensor.set_impl(csr_tensor); + auto name = + egr::Controller::Instance().GenerateUniqueName("generated_tensor"); + tensor.set_name(name); + auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor); + autograd_meta->SetStopGradient(static_cast(stop_gradient)); + if (!autograd_meta->GetMutableGradNode()) { + VLOG(3) << "Tensor(" << name + << ") have not GradNode, add GradNodeAccumulation for it."; + autograd_meta->SetGradNode( + std::make_shared(autograd_meta)); + } + return ToPyObject(tensor); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + PyMethodDef variable_functions[] = { // TODO(jiabin): Remove scale when we have final state tests {"scale", (PyCFunction)(void (*)(void))eager_api_scale, @@ -490,6 +577,14 @@ PyMethodDef variable_functions[] = { {"read_next_tensor_list", (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list, METH_VARARGS | METH_KEYWORDS, NULL}, + /**sparse functions**/ + {"sparse_coo_tensor", + (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"sparse_csr_tensor", + (PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor, + METH_VARARGS | METH_KEYWORDS, NULL}, + /**sparse functions**/ {NULL, NULL, 0, NULL}}; void BindFunctions(PyObject* module) { diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 9f74bcff77d4a6323c18e05f7cf3f06deed08c73..cd47e04a3e9c24e78e5e36107bfa1d085ff16b19 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1097,6 +1097,49 @@ static PyObject* tensor_method_is_sparse_csr(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor_method_to_sparse_coo(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + int64_t sparse_dim = CastPyArg2AttrLong(PyTuple_GET_ITEM(args, 0), 0); + auto coo_tensor = self->tensor.to_sparse_coo(sparse_dim); + egr::EagerUtils::autograd_meta(&coo_tensor) + ->SetStopGradient( + egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient()); + egr::EagerUtils::autograd_meta(&coo_tensor) + ->SetPersistable( + egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable()); + return ToPyObject(coo_tensor); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* tensor_method_to_sparse_csr(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + auto csr_tensor = self->tensor.to_sparse_csr(); + egr::EagerUtils::autograd_meta(&csr_tensor) + ->SetStopGradient( + egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient()); + egr::EagerUtils::autograd_meta(&csr_tensor) + ->SetPersistable( + egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable()); + return ToPyObject(csr_tensor); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* tensor_method_to_dense(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY + auto dense_tensor = self->tensor.to_dense(); + egr::EagerUtils::autograd_meta(&dense_tensor) + ->SetStopGradient( + egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient()); + egr::EagerUtils::autograd_meta(&dense_tensor) + ->SetPersistable( + egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable()); + return ToPyObject(dense_tensor); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + static PyObject* tensor__inplace_version(TensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY @@ -1185,6 +1228,12 @@ PyMethodDef variable_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"is_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_is_sparse_csr, METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_sparse_coo", (PyCFunction)(void (*)(void))tensor_method_to_sparse_coo, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_to_sparse_csr, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_dense", (PyCFunction)(void (*)(void))tensor_method_to_dense, + METH_VARARGS | METH_KEYWORDS, NULL}, /***the method of sparse tensor****/ {"_inplace_version", (PyCFunction)(void (*)(void))tensor__inplace_version, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index b881b5bac21ca81a00a1d0bbe12b4ac9592ee6b0..934c89fc927e8cfcec1b3af717172e7aabfca4a2 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -518,6 +518,30 @@ class PADDLE_API Tensor final { /* Part 10: Auto generated Tensor methods */ + /* Part 11: Methods of converting SparseTensor and DenseTensor to each other + */ + /** + * @brief Convert DenseTensor or SparseCsrTensor to SparseCooTensor + * + * @param sparse_dim, The number of sparse dimensions + * @return Tensor + */ + Tensor to_sparse_coo(const int64_t sparse_dim) const; + + /** + * @brief Convert DenseTensor or SparseCooTensor to SparseCsrTensor + * + * @return Tensor + */ + Tensor to_sparse_csr() const; + + /** + * @brief Convert SparseCooTensor or SparseCsrTensor to DenseTensor + * + * @return Tensor + */ + Tensor to_dense() const; + private: /** * [ Why use abstract TensorImpl interface here? ] diff --git a/paddle/phi/api/lib/CMakeLists.txt b/paddle/phi/api/lib/CMakeLists.txt index 50c267f653564ebee770c058fdf5fb3af14e9c23..90bea6d98025c7c581033d22df44e84e5509db49 100644 --- a/paddle/phi/api/lib/CMakeLists.txt +++ b/paddle/phi/api/lib/CMakeLists.txt @@ -149,4 +149,4 @@ cc_library(phi_bw_function_api SRCS ${bw_api_source_file} DEPS phi_tensor_raw ph cc_library(sparse_api SRCS ${sparse_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api_custom_impl) cc_library(sparse_bw_api SRCS ${sparse_bw_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api sparse_api_custom_impl) -cc_library(phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta) +cc_library(phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api api_gen_utils kernel_dispatch infermeta sparse_api) diff --git a/paddle/phi/api/lib/tensor_method.cc b/paddle/phi/api/lib/tensor_method.cc index c502747c4f9fe6f67d027f82085074d06142fbfb..dde9980d0b951421c3c69b8a9b0506e56939af7b 100644 --- a/paddle/phi/api/lib/tensor_method.cc +++ b/paddle/phi/api/lib/tensor_method.cc @@ -19,6 +19,7 @@ limitations under the License. */ #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/tensor_base.h" +#include "paddle/phi/api/include/sparse_api.h" #include "paddle/phi/api/lib/api_gen_utils.h" #include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/infermeta/unary.h" @@ -183,5 +184,17 @@ void Tensor::copy_(const Tensor &src, } } +Tensor Tensor::to_sparse_coo(const int64_t sparse_dim) const { + return experimental::sparse::to_sparse_coo(*this, sparse_dim); +} + +Tensor Tensor::to_sparse_csr() const { + return experimental::sparse::to_sparse_csr(*this); +} + +Tensor Tensor::to_dense() const { + return experimental::sparse::to_dense(*this); +} + } // namespace experimental } // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py b/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py index 8284771920e81db10d22f08cc96ecc58c422833d..010c049c16be5287dce98f2be69eb8a3a7f7dd22 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py @@ -17,25 +17,53 @@ import unittest import numpy as np import paddle from paddle import _C_ops +from paddle.fluid import core from paddle.fluid.framework import _test_eager_guard class TestSparseUtils(unittest.TestCase): + def test_create_sparse_coo_tensor(self): + with _test_eager_guard(): + non_zero_indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] + non_zero_elements = [1, 2, 3, 4, 5] + dense_shape = [3, 4] + dense_indices = paddle.to_tensor(non_zero_indices) + dense_elements = paddle.to_tensor( + non_zero_elements, dtype='float32') + stop_gradient = False + coo = core.eager.sparse_coo_tensor(dense_indices, dense_elements, + dense_shape, stop_gradient) + print(coo) + + def test_create_sparse_csr_tensor(self): + with _test_eager_guard(): + non_zero_crows = [0, 2, 3, 5] + non_zero_cols = [1, 3, 2, 0, 1] + non_zero_elements = [1, 2, 3, 4, 5] + dense_shape = [3, 4] + dense_crows = paddle.to_tensor(non_zero_crows) + dense_cols = paddle.to_tensor(non_zero_cols) + dense_elements = paddle.to_tensor( + non_zero_elements, dtype='float32') + stop_gradient = False + csr = core.eager.sparse_csr_tensor(dense_crows, dense_cols, + dense_elements, dense_shape, + stop_gradient) + print(csr) + def test_to_sparse_coo(self): with _test_eager_guard(): x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]] non_zero_indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] non_zero_elements = [1, 2, 3, 4, 5] dense_x = paddle.to_tensor(x) - #TODO(zhangkaihuo): change to test the corresponding API - out = _C_ops.final_state_to_sparse_coo(dense_x, 2) - print(out) + out = dense_x.to_sparse_coo(2) assert np.array_equal(out.non_zero_indices().numpy(), non_zero_indices) assert np.array_equal(out.non_zero_elements().numpy(), non_zero_elements) - dense_tensor = _C_ops.final_state_to_dense(out) + dense_tensor = out.to_dense() assert np.array_equal(dense_tensor.numpy(), x) def test_to_sparse_csr(self): @@ -45,14 +73,14 @@ class TestSparseUtils(unittest.TestCase): non_zero_cols = [1, 3, 2, 0, 1] non_zero_elements = [1, 2, 3, 4, 5] dense_x = paddle.to_tensor(x) - out = _C_ops.final_state_to_sparse_csr(dense_x) + out = dense_x.to_sparse_csr() print(out) assert np.array_equal(out.non_zero_crows().numpy(), non_zero_crows) assert np.array_equal(out.non_zero_cols().numpy(), non_zero_cols) assert np.array_equal(out.non_zero_elements().numpy(), non_zero_elements) - dense_tensor = _C_ops.final_state_to_dense(out) + dense_tensor = out.to_dense() assert np.array_equal(dense_tensor.numpy(), x)