diff --git a/paddle/phi/api/lib/api_gen_utils.cc b/paddle/phi/api/lib/api_gen_utils.cc index 162b40547b851ecafa713fefd1ca4658796a282b..6d72db7fa10d6b045ad3fb71b64fb62a10823156 100644 --- a/paddle/phi/api/lib/api_gen_utils.cc +++ b/paddle/phi/api/lib/api_gen_utils.cc @@ -250,6 +250,9 @@ phi::SelectedRows* SetSelectedRowsKernelOutput(Tensor* out) { } phi::TensorBase* SetSparseKernelOutput(Tensor* out, TensorType type) { + if (!out) { + return nullptr; + } if (!out->initialized()) { if (type == TensorType::SPARSE_COO) { auto sparse_tensor = std::make_shared( diff --git a/paddle/phi/api/yaml/sparse_backward.yaml b/paddle/phi/api/yaml/sparse_backward.yaml index 41816898c3a50a592aa9725628eacd0f4803e4e1..8347ee200e815c505478b977d3058c15234263dc 100644 --- a/paddle/phi/api/yaml/sparse_backward.yaml +++ b/paddle/phi/api/yaml/sparse_backward.yaml @@ -36,11 +36,12 @@ args : (Tensor x, Tensor y, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : - func : GeneralBinaryGradInferMeta + func : GeneralBinaryGradInferMeta param : [x, y] kernel : func : add_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, - add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr} + add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}, + add_coo_dense_grad{sparse_coo, dense, sparse_coo -> sparse_coo, dense} - backward_op : addmm_grad forward : addmm(Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0) -> Tensor(out) @@ -104,7 +105,7 @@ args : (Tensor x, Tensor out_grad, DataType value_dtype) output : Tensor(x_grad) infer_meta : - func : UnchangedInferMeta + func : UnchangedInferMeta param: [x] kernel : func : cast_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, @@ -126,7 +127,7 @@ args : (Tensor x, Tensor y, Tensor out, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : - func : GeneralBinaryGradInferMeta + func : GeneralBinaryGradInferMeta param : [x, y] kernel : func : divide_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, @@ -209,7 +210,7 @@ args : (Tensor x, Tensor y, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : - func : GeneralBinaryGradInferMeta + func : GeneralBinaryGradInferMeta param : [x, y] kernel : func : multiply_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, @@ -337,7 +338,7 @@ args : (Tensor x, Tensor y, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : - func : GeneralBinaryGradInferMeta + func : GeneralBinaryGradInferMeta param : [x, y] kernel : func : subtract_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, @@ -399,7 +400,7 @@ args: (Tensor query, Tensor key, Tensor value, Tensor softmax, Tensor out_grad) output : Tensor(query_grad), Tensor(key_grad), Tensor(value_grad) infer_meta : - func : sparse::FusedAttentionGradInferMeta + func : sparse::FusedAttentionGradInferMeta kernel : func : fused_attention_csr_grad{dense, dense, dense, sparse_csr, dense -> dense, dense, dense} layout : softmax diff --git a/paddle/phi/api/yaml/sparse_ops.yaml b/paddle/phi/api/yaml/sparse_ops.yaml index 043c12615fb7f76e7ad6e833cc2ab042e87674c0..a917012b2f7916b8e6ea8b23cc6e096e4134870d 100644 --- a/paddle/phi/api/yaml/sparse_ops.yaml +++ b/paddle/phi/api/yaml/sparse_ops.yaml @@ -35,10 +35,11 @@ args : (Tensor x, Tensor y) output : Tensor(out) infer_meta : - func : ElementwiseInferMeta + func : ElementwiseInferMeta kernel : func : add_coo_coo{sparse_coo, sparse_coo -> sparse_coo}, add_csr_csr{sparse_csr, sparse_csr -> sparse_csr} + add_coo_dense{sparse_coo, dense -> sparse_coo}, layout : x backward : add_grad @@ -114,7 +115,7 @@ args : (Tensor x, Tensor y) output : Tensor(out) infer_meta : - func : ElementwiseInferMeta + func : ElementwiseInferMeta kernel : func : divide_coo_coo{sparse_coo, sparse_coo -> sparse_coo}, divide_csr_csr{sparse_csr, sparse_csr -> sparse_csr} diff --git a/paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc b/paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc index 58ed3f2d6b0b6a73bb0358d9483a4f9cd35f8d11..98afed84d6643836c5a36779dc05a646315d4150 100644 --- a/paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc @@ -415,3 +415,14 @@ PD_REGISTER_KERNEL(divide_coo_coo_grad, kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(3).SetDataLayout(phi::DataLayout::SPARSE_COO); } + +PD_REGISTER_KERNEL(add_coo_dense_grad, + CPU, + ALL_LAYOUT, + phi::sparse::ElementWiseAddDenseGradKernel, + float, + double, + int, + int64_t) { + kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); +} diff --git a/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc b/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc index 4e0eb90d7816df6cb2f6b7f0d502a85e1af5ac5b..0e46efc0e867377c9a59cd7b42c080367e6e21ca 100644 --- a/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc @@ -156,6 +156,21 @@ void ElementWiseCooKernelImpl(const Context& dev_ctx, "shape = [%s], Y's shape = [%s].", x.dims(), y.dims())); + + // temporary policy: for broadcast add + // TODO(zhangkaihuo): implement a correct function + const bool is_add = std::is_same>::value; + if (is_add && x.indices().numel() == y.indices().numel()) { + int compare_indices = memcmp(x.indices().data(), + y.indices().data(), + sizeof(IntT) * x.indices().numel()); + if (compare_indices == 0) { + EmptyLikeCooKernel(dev_ctx, x, out); + phi::AddKernel( + dev_ctx, x.values(), y.values(), out->mutable_values()); + return; + } + } int64_t element_size = 1; for (auto j = 1; j < x.values().dims().size(); ++j) { element_size *= x.values().dims()[j]; @@ -435,3 +450,14 @@ PD_REGISTER_KERNEL(divide_coo_coo, kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO); } + +PD_REGISTER_KERNEL(add_coo_dense, + CPU, + ALL_LAYOUT, + phi::sparse::ElementWiseAddDenseKernel, + float, + double, + int, + int64_t) { + kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); +} diff --git a/paddle/phi/kernels/sparse/elementwise_grad_kernel.h b/paddle/phi/kernels/sparse/elementwise_grad_kernel.h index 86eb3b4381dc070c98a8cd3d81fb74db6900f9b3..f16e2f95d47eb2002c0fd17d5a340b7687fc363b 100644 --- a/paddle/phi/kernels/sparse/elementwise_grad_kernel.h +++ b/paddle/phi/kernels/sparse/elementwise_grad_kernel.h @@ -14,6 +14,9 @@ limitations under the License. */ #pragma once +#include "paddle/phi/kernels/elementwise_add_grad_kernel.h" +#include "paddle/phi/kernels/sparse/empty_kernel.h" + #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_csr_tensor.h" @@ -119,5 +122,27 @@ std::vector ElementWiseDivideCooGrad( return std::vector{dx, dy}; } +template +void ElementWiseAddDenseGradKernel(const Context& dev_ctx, + const SparseCooTensor& x, + const DenseTensor& y, + const SparseCooTensor& dout, + SparseCooTensor* dx, + DenseTensor* dy) { + DenseTensor* x_values_grad = nullptr; + DenseTensor* y_grad = nullptr; + if (dx) { + EmptyLikeCooKernel(dev_ctx, x, dx); + x_values_grad = dx->mutable_values(); + } + + if (dy) { + *dy = phi::EmptyLike(dev_ctx, y); + y_grad = dy; + } + phi::AddGradKernel( + dev_ctx, x.values(), y, dout.values(), -1, x_values_grad, y_grad); +} + } // namespace sparse } // namespace phi diff --git a/paddle/phi/kernels/sparse/elementwise_kernel.h b/paddle/phi/kernels/sparse/elementwise_kernel.h index 59a554348cfea8a4e77b7ceeacbb711417cbcb18..515644d4fcfce299ff25bd475bfb959aa5970c23 100644 --- a/paddle/phi/kernels/sparse/elementwise_kernel.h +++ b/paddle/phi/kernels/sparse/elementwise_kernel.h @@ -14,6 +14,10 @@ limitations under the License. */ #pragma once +#include "paddle/phi/kernels/elementwise_add_kernel.h" +#include "paddle/phi/kernels/sparse/elementwise_kernel.h" +#include "paddle/phi/kernels/sparse/empty_kernel.h" + #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_csr_tensor.h" @@ -78,5 +82,21 @@ DEFINE_ELEMENTWISE_KERNEL_FUNC(Subtract) DEFINE_ELEMENTWISE_KERNEL_FUNC(Multiply) DEFINE_ELEMENTWISE_KERNEL_FUNC(Divide) +template +void ElementWiseAddDenseKernel(const Context& dev_ctx, + const SparseCooTensor& x, + const DenseTensor& y, + SparseCooTensor* out) { + // TODO(zhangkaiuo): to support universal sparse + dense + if (y.dims().size() == 1 && y.dims()[0] == x.dims()[x.dims().size() - 1]) { + EmptyLikeCooKernel(dev_ctx, x, out); + phi::AddKernel(dev_ctx, x.values(), y, out->mutable_values()); + out->SetIndicesDict(x.GetIndicesDict()); + } else { + PADDLE_THROW( + errors::Unimplemented("Not support Sparse + Dense in GPU mode")); + } +} + } // namespace sparse } // namespace phi diff --git a/paddle/phi/kernels/sparse/gpu/elementwise_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/elementwise_grad_kernel.cu index e434dad588e134f2528294bc88152ce3c828e05f..e7f0c9d96e9205c1bf65b9df3d730aa9c93b1a6e 100644 --- a/paddle/phi/kernels/sparse/gpu/elementwise_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/elementwise_grad_kernel.cu @@ -15,6 +15,9 @@ limitations under the License. */ #include "paddle/phi/kernels/sparse/elementwise_grad_kernel.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" +#include "paddle/phi/kernels/empty_kernel.h" +#include "paddle/phi/kernels/funcs/elementwise_grad_base.h" +#include "paddle/phi/kernels/funcs/reduce_function.h" #include "paddle/phi/kernels/sparse/empty_kernel.h" namespace phi { @@ -54,3 +57,15 @@ PD_REGISTER_KERNEL(add_coo_coo_grad, kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO); } + +PD_REGISTER_KERNEL(add_coo_dense_grad, + GPU, + ALL_LAYOUT, + phi::sparse::ElementWiseAddDenseGradKernel, + float, + double, + int, + int64_t, + phi::dtype::float16) { + kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); +} diff --git a/paddle/phi/kernels/sparse/gpu/elementwise_kernel.cu b/paddle/phi/kernels/sparse/gpu/elementwise_kernel.cu index 7496f47de894887fc8ee923d00d5cc1966b60f79..47daa1eae19edafa5f404b0ae3c2837d6cb45f6f 100644 --- a/paddle/phi/kernels/sparse/gpu/elementwise_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/elementwise_kernel.cu @@ -31,6 +31,7 @@ void ElementWiseAddCooGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const SparseCooTensor& y, SparseCooTensor* out) { + // TODO(zhangkaiuo): to support universal sparse + sparse const auto& x_indices = x.indices(); const auto& y_indices = y.indices(); PADDLE_ENFORCE_EQ( @@ -57,6 +58,7 @@ void ElementWiseAddCooGPUKernel(const GPUContext& dev_ctx, EmptyLikeCooKernel(dev_ctx, x, out); phi::AddKernel( dev_ctx, x.values(), y.values(), out->mutable_values()); + out->SetIndicesDict(x.GetIndicesDict()); } template @@ -86,3 +88,15 @@ PD_REGISTER_KERNEL(add_coo_coo, kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO); } + +PD_REGISTER_KERNEL(add_coo_dense, + GPU, + ALL_LAYOUT, + phi::sparse::ElementWiseAddDenseKernel, + float, + double, + int, + int64_t, + phi::dtype::float16) { + kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); +} diff --git a/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py b/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py index 20f66e5f9a65e060ff2a2bda78fb11ca2ca3e245..9acad42a9b8ee15bd652b192b38daf58697b9a8a 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py @@ -163,6 +163,32 @@ class TestSparseElementWiseAPI(unittest.TestCase): np.testing.assert_allclose(sp_b.grad.values().numpy(), values2.grad.numpy()) + def test_add_bias(self): + indices_data = [[0, 1], [0, 3]] + values_data = [[1.0, 1.0], [2.0, 2.0]] + shape = [2, 4, 2] + + sp_a = sparse.sparse_coo_tensor(indices_data, + values_data, + shape, + stop_gradient=False) + + bias_values = [1.0, 2.0] + + values1 = paddle.to_tensor(values_data, stop_gradient=False) + values2 = paddle.to_tensor(bias_values, stop_gradient=False) + values3 = paddle.to_tensor(bias_values, stop_gradient=False) + + #c.values() = a.values() + b + sp_c = sparse.add(sp_a, values2) + sp_c.backward() + ref_c = values1 + values3 + ref_c.backward() + np.testing.assert_allclose(sp_c.values().numpy(), ref_c.numpy()) + np.testing.assert_allclose(sp_a.grad.values().numpy(), + values1.grad.numpy()) + np.testing.assert_allclose(values2.grad.numpy(), values3.grad.numpy()) + if __name__ == "__main__": paddle.device.set_device('cpu') diff --git a/python/paddle/incubate/sparse/binary.py b/python/paddle/incubate/sparse/binary.py index b09c991800dbad550f246633eaea3a2ea825aa15..5a6a77490383285046a0fb78e361a12148e09d6f 100644 --- a/python/paddle/incubate/sparse/binary.py +++ b/python/paddle/incubate/sparse/binary.py @@ -253,7 +253,7 @@ def add(x, y, name=None): """ if y.dtype != x.dtype: - y = _C_ops.sparse_cast(y, None, x.dtype) + y = cast(y, None, x.dtype) return _C_ops.sparse_add(x, y) diff --git a/python/paddle/incubate/sparse/nn/functional/conv.py b/python/paddle/incubate/sparse/nn/functional/conv.py index 0512b83d842e68ea7588f73b907ca84da5c45238..284ce1020bbf242bd0ad8c6fbc88fd5d4f373b86 100644 --- a/python/paddle/incubate/sparse/nn/functional/conv.py +++ b/python/paddle/incubate/sparse/nn/functional/conv.py @@ -18,6 +18,8 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from paddle.fluid.layers.utils import convert_to_list from paddle.fluid.layers.nn import elementwise_add from ...creation import sparse_coo_tensor +from ...binary import add +from paddle.tensor import arange from paddle.nn.functional.conv import _update_padding_nd @@ -67,12 +69,7 @@ def _conv3d(x, groups, subm, key if key is not None else "") if bias is not None: - values = pre_bias.values() - add_bias = elementwise_add(values, bias, axis=1) - return sparse_coo_tensor(pre_bias.indices(), - add_bias, - shape=pre_bias.shape, - stop_gradient=pre_bias.stop_gradient) + return add(pre_bias, bias) else: return pre_bias