未验证 提交 b9da48da 编写于 作者: Z zhangkaihuo 提交者: GitHub

Opt the compilation of sparse kernel (#41086)

上级 ac5548a2
......@@ -116,6 +116,9 @@ function(kernel_library TARGET)
if ("${kernel_library_SUB_DIR}" STREQUAL "selected_rows")
set(target_suffix "_sr")
endif()
if ("${kernel_library_SUB_DIR}" STREQUAL "sparse")
set(target_suffix "_sp")
endif()
list(LENGTH kernel_library_SRCS kernel_library_SRCS_len)
# one kernel only match one impl file in each backend
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_activation_grad_kernel.h"
#include "paddle/phi/kernels/sparse/activation_grad_kernel.h"
#include "paddle/phi/kernels/activation_grad_kernel.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_activation_kernel.h"
#include "paddle/phi/kernels/sparse/activation_kernel.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
......
......@@ -24,8 +24,8 @@ limitations under the License. */
#include "paddle/phi/kernels/activation_grad_kernel.h"
#include "paddle/phi/kernels/activation_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_activation_grad_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_activation_kernel.h"
#include "paddle/phi/kernels/sparse/activation_grad_kernel.h"
#include "paddle/phi/kernels/sparse/activation_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_utils_kernel.h"
namespace phi {
......
......@@ -33,6 +33,7 @@ class TestSparseUtils(unittest.TestCase):
stop_gradient = False
coo = core.eager.sparse_coo_tensor(dense_indices, dense_elements,
dense_shape, stop_gradient)
print(coo)
def test_create_sparse_csr_tensor(self):
......@@ -49,6 +50,7 @@ class TestSparseUtils(unittest.TestCase):
csr = core.eager.sparse_csr_tensor(dense_crows, dense_cols,
dense_elements, dense_shape,
stop_gradient)
print(csr)
def test_to_sparse_coo(self):
......@@ -58,6 +60,7 @@ class TestSparseUtils(unittest.TestCase):
non_zero_elements = [1, 2, 3, 4, 5]
dense_x = paddle.to_tensor(x)
out = dense_x.to_sparse_coo(2)
print(out)
assert np.array_equal(out.non_zero_indices().numpy(),
non_zero_indices)
assert np.array_equal(out.non_zero_elements().numpy(),
......@@ -81,6 +84,7 @@ class TestSparseUtils(unittest.TestCase):
non_zero_elements)
dense_tensor = out.to_dense()
print(dense_tensor)
assert np.array_equal(dense_tensor.numpy(), x)
......
......@@ -286,30 +286,45 @@ def _format_dense_tensor(tensor, indent):
def sparse_tensor_to_string(tensor, prefix='Tensor'):
indent = len(prefix) + 1
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \n{indent}{data})"
if tensor.is_sparse_coo():
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \n{indent}{indices}, \n{indent}{values})"
indices_tensor = tensor.non_zero_indices()
elements_tensor = tensor.non_zero_elements()
indices_data = _format_dense_tensor(indices_tensor, indent)
elements_data = _format_dense_tensor(elements_tensor, indent)
data = 'non_zero_indices=' + indices_data + ',\nnon_zero_elements=' + elements_data
indices_data = 'indices=' + _format_dense_tensor(indices_tensor, indent
+ len('indices='))
values_data = 'values=' + _format_dense_tensor(elements_tensor, indent +
len('values='))
return _template.format(
prefix=prefix,
shape=tensor.shape,
dtype=tensor.dtype,
place=tensor._place_str,
stop_gradient=tensor.stop_gradient,
indent=' ' * indent,
indices=indices_data,
values=values_data)
else:
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \n{indent}{crows}, \n{indent}{cols}, \n{indent}{values})"
crows_tensor = tensor.non_zero_crows()
cols_tensor = tensor.non_zero_cols()
elements_tensor = tensor.non_zero_elements()
crows_data = _format_dense_tensor(crows_tensor, indent)
cols_data = _format_dense_tensor(cols_tensor, indent)
elements_data = _format_dense_tensor(elements_tensor, indent)
data = 'non_zero_crows=' + crows_data + ',\nnon_zero_cols=' + cols_data + ',\nnon_zero_elements=' + elements_data
crows_data = 'crows=' + _format_dense_tensor(crows_tensor, indent +
len('crows='))
cols_data = 'cols=' + _format_dense_tensor(cols_tensor, indent +
len('cols='))
values_data = 'values=' + _format_dense_tensor(elements_tensor, indent +
len('values='))
return _template.format(
prefix=prefix,
shape=tensor.shape,
dtype=tensor.dtype,
place=tensor._place_str,
stop_gradient=tensor.stop_gradient,
indent=' ' * indent,
data=data)
return _template.format(
prefix=prefix,
shape=tensor.shape,
dtype=tensor.dtype,
place=tensor._place_str,
stop_gradient=tensor.stop_gradient,
indent=' ' * indent,
crows=crows_data,
cols=cols_data,
values=values_data)
def tensor_to_string(tensor, prefix='Tensor'):
......@@ -317,11 +332,11 @@ def tensor_to_string(tensor, prefix='Tensor'):
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"
if not tensor._is_initialized():
return "Tensor(Not initialized)"
if tensor.is_sparse():
return sparse_tensor_to_string(tensor, prefix)
if not tensor._is_dense_tensor_hold_allocation():
return "Tensor(Not initialized)"
else:
data = _format_dense_tensor(tensor, indent)
return _template.format(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册