From 3a7b1810045457d1e08358c8a1db7685e599f68d Mon Sep 17 00:00:00 2001 From: zhangkaihuo Date: Tue, 23 Aug 2022 14:56:05 +0800 Subject: [PATCH] [Sparse]Use shorted function names (#45325) * rename the member function of SparseTensor * use shorter function names --- .../phi/kernels/sparse/gpu/coalesce_kernel.cu | 13 ++++---- paddle/phi/kernels/sparse/gpu/conv.cu.h | 10 +++---- .../kernels/sparse/gpu/conv_grad_kernel.cu | 14 ++++----- paddle/phi/kernels/sparse/gpu/conv_kernel.cu | 29 +++++++++--------- .../phi/kernels/sparse/gpu/convolution.cu.h | 10 +++---- paddle/phi/kernels/sparse/gpu/full_kernel.cu | 23 +++++--------- .../sparse/gpu/fused_attention_grad_kernel.cu | 2 +- .../sparse/gpu/fused_attention_kernel.cu | 4 +-- paddle/phi/kernels/sparse/gpu/mask_kernel.cu | 10 +++---- .../phi/kernels/sparse/gpu/mv_grad_kernel.cu | 30 +++++++++---------- .../kernels/sparse/gpu/pool_grad_kernel.cu | 12 +++----- paddle/phi/kernels/sparse/gpu/pool_kernel.cu | 2 +- .../kernels/sparse/gpu/softmax_grad_kernel.cu | 4 +-- .../phi/kernels/sparse/gpu/softmax_kernel.cu | 18 +++++------ .../kernels/sparse/gpu/sparse_utils_kernel.cu | 27 ++++++++--------- 15 files changed, 92 insertions(+), 116 deletions(-) diff --git a/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu b/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu index d4792804f0..d369c0ecd9 100644 --- a/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu @@ -30,13 +30,13 @@ template void CoalesceGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, SparseCooTensor* out) { - const DenseTensor& x_indices = x.non_zero_indices(); + const DenseTensor& x_indices = x.indices(); const DenseTensor& x_values = x.values(); DenseTensor out_indices = phi::EmptyLike(dev_ctx, x_indices); DenseTensor out_values = phi::EmptyLike(dev_ctx, x_values); const int64_t nnz = x.nnz(); - const int64_t sparse_dim = x.non_zero_indices().dims()[0]; + const int64_t sparse_dim = x.indices().dims()[0]; std::vector sparse_offsets(sparse_dim); phi::funcs::sparse::CalcOffsetsPerDim( @@ -64,7 +64,7 @@ void CoalesceGPUKernel(const GPUContext& dev_ctx, config.thread_per_block, 0, dev_ctx.stream()>>>( - x.non_zero_indices().data(), + x.indices().data(), d_sparse_offsets.data(), indexs.numel(), sparse_dim, @@ -175,10 +175,9 @@ template void CoalesceKernel(const Context& dev_ctx, const SparseCooTensor& x, SparseCooTensor* out) { - PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "CoalesceGPUKernel", ([&] { - CoalesceGPUKernel(dev_ctx, x, out); - })); + PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "CoalesceGPUKernel", ([&] { + CoalesceGPUKernel(dev_ctx, x, out); + })); } } // namespace sparse } // namespace phi diff --git a/paddle/phi/kernels/sparse/gpu/conv.cu.h b/paddle/phi/kernels/sparse/gpu/conv.cu.h index 1dc3686c41..2a524eb465 100644 --- a/paddle/phi/kernels/sparse/gpu/conv.cu.h +++ b/paddle/phi/kernels/sparse/gpu/conv.cu.h @@ -589,8 +589,8 @@ int ProductRuleBook(const Context& dev_ctx, int* h_offsets) { auto indices_dtype = paddle::experimental::CppTypeToDataType::Type(); const int64_t non_zero_num = x.nnz(); - const auto& non_zero_indices = x.non_zero_indices(); - const IntT* indices_ptr = non_zero_indices.data(); + const auto& indices = x.indices(); + const IntT* indices_ptr = indices.data(); int* counter_ptr = counter_per_kernel->data(); int* offsets_ptr = offsets_per_kernel->data(); int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2]; @@ -629,12 +629,10 @@ int ProductRuleBook(const Context& dev_ctx, if (subm) { DenseTensor tmp_rulebook = phi::Empty(dev_ctx, std::move(rulebook_meta)); IntT* rulebook_ptr = tmp_rulebook.data(); - DenseTensor out_indices = - phi::EmptyLike(dev_ctx, x.non_zero_indices()); + DenseTensor out_indices = phi::EmptyLike(dev_ctx, x.indices()); DenseTensor out_values = phi::Empty(dev_ctx, {x.nnz(), kernel_sizes[4]}); - phi::Copy( - dev_ctx, x.non_zero_indices(), dev_ctx.GetPlace(), false, &out_indices); + phi::Copy(dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &out_indices); phi::backends::gpu::GpuMemsetAsync( out_index_table_ptr, 0, sizeof(int) * table_size, dev_ctx.stream()); diff --git a/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu index 9461cec7b2..5d57afab40 100644 --- a/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu @@ -79,8 +79,7 @@ void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, int half_kernel_size = kernel_size / 2; auto blas = phi::funcs::GetBlas(dev_ctx); - DenseTensor x_grad_indices = - phi::EmptyLike(dev_ctx, x.non_zero_indices()); + DenseTensor x_grad_indices = phi::EmptyLike(dev_ctx, x.indices()); DenseTensor x_grad_values = phi::EmptyLike(dev_ctx, x.values()); T* x_grad_values_ptr = x_grad_values.data(); phi::backends::gpu::GpuMemsetAsync(x_grad_values_ptr, @@ -89,11 +88,8 @@ void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, dev_ctx.stream()); phi::backends::gpu::GpuMemsetAsync( d_x_features_ptr, 0, sizeof(T) * d_x_features.numel(), dev_ctx.stream()); - phi::Copy(dev_ctx, - x.non_zero_indices(), - dev_ctx.GetPlace(), - false, - &x_grad_indices); + phi::Copy( + dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices); x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true); std::vector offsets(kernel_size + 1); @@ -142,7 +138,7 @@ void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, <<>>(x.non_zero_indices().data(), + dev_ctx.stream()>>>(x.indices().data(), x.nnz(), d_x_dims, nullptr, @@ -269,7 +265,7 @@ void Conv3dCooGradKernel(const Context& dev_ctx, SparseCooTensor* x_grad, DenseTensor* kernel_grad) { PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "Conv3dCooGradGPUKernel", ([&] { + x.indices().dtype(), "Conv3dCooGradGPUKernel", ([&] { Conv3dCooGradGPUKernel(dev_ctx, x, kernel, diff --git a/paddle/phi/kernels/sparse/gpu/conv_kernel.cu b/paddle/phi/kernels/sparse/gpu/conv_kernel.cu index dc6253b725..e5727c4faa 100644 --- a/paddle/phi/kernels/sparse/gpu/conv_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/conv_kernel.cu @@ -202,21 +202,20 @@ void Conv3dCooKernel(const Context& dev_ctx, SparseCooTensor* out, DenseTensor* rulebook, DenseTensor* counter) { - PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "Conv3dCooGPUKernel", ([&] { - Conv3dCooGPUKernel(dev_ctx, - x, - kernel, - paddings, - dilations, - strides, - groups, - subm, - key, - out, - rulebook, - counter); - })); + PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "Conv3dCooGPUKernel", ([&] { + Conv3dCooGPUKernel(dev_ctx, + x, + kernel, + paddings, + dilations, + strides, + groups, + subm, + key, + out, + rulebook, + counter); + })); } } // namespace sparse diff --git a/paddle/phi/kernels/sparse/gpu/convolution.cu.h b/paddle/phi/kernels/sparse/gpu/convolution.cu.h index 6df1969aad..03f230c079 100644 --- a/paddle/phi/kernels/sparse/gpu/convolution.cu.h +++ b/paddle/phi/kernels/sparse/gpu/convolution.cu.h @@ -302,8 +302,8 @@ int ProductRuleBook(const Context& dev_ctx, std::vector* h_offsets) { auto indices_dtype = paddle::experimental::CppTypeToDataType::Type(); const int64_t non_zero_num = x.nnz(); - const auto& non_zero_indices = x.non_zero_indices(); - const IntT* indices_ptr = non_zero_indices.data(); + const auto& indices = x.indices(); + const IntT* indices_ptr = indices.data(); DenseTensor in_indexs = phi::Empty( dev_ctx, DenseTensorMeta(indices_dtype, {x.nnz()}, DataLayout::NCHW)); int* counter_ptr = counter_per_kernel->data(); @@ -538,14 +538,12 @@ int ProductRuleBook(const Context& dev_ctx, rulebook_ptr + 2 * rulebook_len); out->SetMember(out_indices, out_values, out_dims, true); } else { - DenseTensor out_indices = - phi::EmptyLike(dev_ctx, x.non_zero_indices()); + DenseTensor out_indices = phi::EmptyLike(dev_ctx, x.indices()); DenseTensor out_values = phi::Empty( dev_ctx, DenseTensorMeta( x.dtype(), {x.nnz(), kernel_sizes[4]}, x.values().layout())); - phi::Copy( - dev_ctx, x.non_zero_indices(), dev_ctx.GetPlace(), false, &out_indices); + phi::Copy(dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &out_indices); out->SetMember(out_indices, out_values, out_dims, true); } return rulebook_len; diff --git a/paddle/phi/kernels/sparse/gpu/full_kernel.cu b/paddle/phi/kernels/sparse/gpu/full_kernel.cu index acc4d1398c..d5ccfc95c6 100644 --- a/paddle/phi/kernels/sparse/gpu/full_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/full_kernel.cu @@ -42,11 +42,8 @@ void CooFullLikeKernel(const Context& dev_ctx, const Scalar& val, DataType dtype, SparseCooTensor* out) { - phi::Copy(dev_ctx, - x.non_zero_indices(), - dev_ctx.GetPlace(), - false, - out->mutable_non_zero_indices()); + phi::Copy( + dev_ctx, x.indices(), dev_ctx.GetPlace(), false, out->mutable_indices()); DenseTensor* values = out->mutable_values(); values->Resize(x.values().dims()); @@ -68,17 +65,11 @@ void CsrFullLikeKernel(const Context& dev_ctx, const Scalar& val, DataType dtype, SparseCsrTensor* out) { - phi::Copy(dev_ctx, - x.non_zero_crows(), - dev_ctx.GetPlace(), - false, - out->mutable_non_zero_crows()); - - phi::Copy(dev_ctx, - x.non_zero_cols(), - dev_ctx.GetPlace(), - false, - out->mutable_non_zero_cols()); + phi::Copy( + dev_ctx, x.crows(), dev_ctx.GetPlace(), false, out->mutable_crows()); + + phi::Copy( + dev_ctx, x.cols(), dev_ctx.GetPlace(), false, out->mutable_cols()); DenseTensor* values = out->mutable_values(); values->Resize(x.values().dims()); diff --git a/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu index f3fabb0592..ab3a75f897 100644 --- a/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu @@ -93,7 +93,7 @@ void FusedAttentionCsrGradKernel(const Context& dev_ctx, dim3 block(WARP_SIZE, 8); AttnSoftmaxGpuGradKernel<<>>( - softmax.non_zero_crows().data(), + softmax.crows().data(), softmax.values().data(), dsoftmax.mutable_values()->data(), d_sdd_result.mutable_values()->data(), diff --git a/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu b/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu index f98d913b95..ec100eae3a 100644 --- a/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu @@ -200,8 +200,8 @@ void FusedAttentionCsrKernel( int batch_nnz = sdd_result.nnz() / batch_num; AttnSoftmaxGpuKernel<<>>( - sdd_result.non_zero_crows().data(), - sdd_result.non_zero_cols().data(), + sdd_result.crows().data(), + sdd_result.cols().data(), sdd_result.values().data(), kp_mask_ptr ? kp_mask_ptr->data() : nullptr, attn_mask_ptr ? attn_mask_ptr->data() : nullptr, diff --git a/paddle/phi/kernels/sparse/gpu/mask_kernel.cu b/paddle/phi/kernels/sparse/gpu/mask_kernel.cu index 3865400dcf..c4d2a691a4 100644 --- a/paddle/phi/kernels/sparse/gpu/mask_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/mask_kernel.cu @@ -58,7 +58,7 @@ void SparseMaskGPUKernel(const GPUContext& dev_ctx, x.dims(), mask.dims(), phi::errors::InvalidArgument("the input x and mask must have the shape")); - const DenseTensor& indices = mask.non_zero_indices(); + const DenseTensor& indices = mask.indices(); const DenseTensor& values = mask.values(); const int sparse_dim = mask.sparse_dim(); DenseTensor sparse_offsets = phi::Empty( @@ -103,7 +103,7 @@ void SparseMaskGPUKernel(const GPUContext& dev_ctx, /** * @brief Filter the DenseTensor x by the - * mask.non_zero_indices() and output a SparseCooTensor + * mask.indices() and output a SparseCooTensor * x and mask must have the same shape. **/ template @@ -112,7 +112,7 @@ void SparseMaskKernel(const Context& dev_ctx, const SparseCooTensor& mask, SparseCooTensor* out) { PD_VISIT_BASE_INTEGRAL_TYPES( - mask.non_zero_indices().dtype(), "SparseMaskGPUKernel", ([&] { + mask.indices().dtype(), "SparseMaskGPUKernel", ([&] { SparseMaskGPUKernel(dev_ctx, x, mask, out); })); } @@ -197,7 +197,7 @@ void SparseMaskHelperGPUKernel(const GPUContext& dev_ctx, config.thread_per_block, 0, dev_ctx.stream()>>>( - x.non_zero_indices().data(), + x.indices().data(), d_sparse_offsets.data(), x_indexs.numel(), sparse_dim, @@ -270,7 +270,7 @@ void SparseMaskHelperKernel(const Context& dev_ctx, const DenseTensor& mask_indices, DenseTensor* out) { PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "SparseMaskHelperGPUKernel", ([&] { + x.indices().dtype(), "SparseMaskHelperGPUKernel", ([&] { SparseMaskHelperGPUKernel(dev_ctx, x, mask_indices, out); })); } diff --git a/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu index c4169c275e..8c7a8fa827 100644 --- a/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu @@ -71,14 +71,14 @@ void MvCooGradKernel(const Context &dev_ctx, EmptyLikeCooKernel(dev_ctx, x, dx); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, dx->nnz()); PD_VISIT_BASE_INTEGRAL_TYPES( - dx->non_zero_indices().dtype(), "MvCooGradKernel", ([&] { + dx->indices().dtype(), "MvCooGradKernel", ([&] { MvCooGradGpuKernel <<>>(dout.data(), vec.data(), - dx->non_zero_indices().data(), + dx->indices().data(), dx->mutable_values()->data(), dx->nnz()); })); @@ -117,19 +117,19 @@ void MvCsrGradKernel(const Context &dev_ctx, int col_number = dx->dims()[1]; auto config = phi::backends::gpu::GetGpuLaunchConfig2D( dev_ctx, col_number, row_number); - PD_VISIT_BASE_INTEGRAL_TYPES( - dx->non_zero_crows().dtype(), "MvCsrGradKernel", ([&] { - MvCsrGradGpuKernel - <<>>(dout.data(), - vec.data(), - dx->non_zero_crows().data(), - dx->non_zero_cols().data(), - dx->mutable_values()->data(), - row_number); - })); + PD_VISIT_BASE_INTEGRAL_TYPES(dx->crows().dtype(), "MvCsrGradKernel", ([&] { + MvCsrGradGpuKernel + <<>>( + dout.data(), + vec.data(), + dx->crows().data(), + dx->cols().data(), + dx->mutable_values()->data(), + row_number); + })); } // dvec{Dense} = x'{SparseCsr} * dout{Dense} diff --git a/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu index 5902fc6bad..bb89b29a8d 100644 --- a/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu @@ -72,18 +72,14 @@ void MaxPoolCooGradGPUKernel(const GPUContext& dev_ctx, const T* out_features_ptr = out.values().data(); const T* out_grad_ptr = out_grad.values().data(); // TODO(zhangkaihuo): call phi::sparse::EmptyLike - DenseTensor x_grad_indices = - phi::EmptyLike(dev_ctx, x.non_zero_indices()); + DenseTensor x_grad_indices = phi::EmptyLike(dev_ctx, x.indices()); DenseTensor x_grad_values = phi::EmptyLike(dev_ctx, x.values()); x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true); T* x_grad_ptr = x_grad_values.data(); phi::funcs::SetConstant set_zero; set_zero(dev_ctx, &x_grad_values, static_cast(0.0f)); - phi::Copy(dev_ctx, - x.non_zero_indices(), - dev_ctx.GetPlace(), - false, - &x_grad_indices); + phi::Copy( + dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices); for (int i = 0; i < kernel_size; i++) { if (counter_ptr[i] <= 0) { @@ -117,7 +113,7 @@ void MaxPoolCooGradKernel(const Context& dev_ctx, const std::vector& kernel_sizes, SparseCooTensor* x_grad) { PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "MaxPoolCooGradGPUKernel", ([&] { + x.indices().dtype(), "MaxPoolCooGradGPUKernel", ([&] { MaxPoolCooGradGPUKernel( dev_ctx, x, rulebook, counter, out, out_grad, kernel_sizes, x_grad); })); diff --git a/paddle/phi/kernels/sparse/gpu/pool_kernel.cu b/paddle/phi/kernels/sparse/gpu/pool_kernel.cu index 9307513daa..3f0ec2c271 100644 --- a/paddle/phi/kernels/sparse/gpu/pool_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/pool_kernel.cu @@ -140,7 +140,7 @@ void MaxPoolCooKernel(const Context& dev_ctx, DenseTensor* rulebook, DenseTensor* counter) { PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "MaxPoolCooGPUKernel", ([&] { + x.indices().dtype(), "MaxPoolCooGPUKernel", ([&] { MaxPoolCooGPUKernel(dev_ctx, x, kernel_sizes, diff --git a/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu index 6ebd1e29a5..5a66786ebb 100644 --- a/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu @@ -93,9 +93,9 @@ void SoftmaxCsrGradKernel(const Context& dev_ctx, dim3 block(32, 4); PD_VISIT_BASE_INTEGRAL_TYPES( - out.non_zero_crows().dtype(), "SoftmaxCsrGradKernel", ([&] { + out.crows().dtype(), "SoftmaxCsrGradKernel", ([&] { SoftmaxGradGpuKernel<<>>( - out.non_zero_crows().data(), + out.crows().data(), out.values().data(), dout.values().data(), dx->mutable_values()->data(), diff --git a/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu b/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu index 3c283ed132..ef6b6d91e5 100644 --- a/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu @@ -105,15 +105,15 @@ void SoftmaxCsrKernel(const Context& dev_ctx, dim3 grid((total_row_number + 3) / 4); dim3 block(32, 4); - PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_crows().dtype(), "CsrSoftmaxKernel", ([&] { - SoftmaxGpuKernel<<>>( - x.non_zero_crows().data(), - x.values().data(), - out->mutable_values()->data(), - row_number, - total_row_number); - })); + PD_VISIT_BASE_INTEGRAL_TYPES(x.crows().dtype(), "CsrSoftmaxKernel", ([&] { + SoftmaxGpuKernel + <<>>( + x.crows().data(), + x.values().data(), + out->mutable_values()->data(), + row_number, + total_row_number); + })); } } // namespace sparse diff --git a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu index e31f3af520..dbd2f30593 100644 --- a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu @@ -212,9 +212,9 @@ void SparseCsrToCooGPUKernel(const GPUContext& dev_ctx, const SparseCsrTensor& x, SparseCooTensor* out) { const DDim& x_dims = x.dims(); - const int64_t non_zero_num = x.non_zero_cols().numel(); - const auto& csr_crows = x.non_zero_crows(); - const auto& csr_cols = x.non_zero_cols(); + const int64_t non_zero_num = x.cols().numel(); + const auto& csr_crows = x.crows(); + const auto& csr_cols = x.cols(); const auto& csr_values = x.values(); const IntT* csr_crows_data = csr_crows.data(); const IntT* csr_cols_data = csr_cols.data(); @@ -278,7 +278,7 @@ void SparseCsrToCooKernel(const Context& dev_ctx, const SparseCsrTensor& x, SparseCooTensor* out) { PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_crows().dtype(), "SparseCsrToCooGPUKernel", ([&] { + x.crows().dtype(), "SparseCsrToCooGPUKernel", ([&] { SparseCsrToCooGPUKernel(dev_ctx, x, out); })); } @@ -358,15 +358,14 @@ void SparseCooToCsrGPUKernel(const GPUContext& dev_ctx, int batchs = x_dims.size() == 2 ? 1 : x_dims[0]; int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1]; - phi::DenseTensor non_zero_crows = - phi::Empty(dev_ctx, {batchs * (rows + 1)}); - phi::DenseTensor non_zero_cols = phi::Empty(dev_ctx, {non_zero_num}); + phi::DenseTensor crows = phi::Empty(dev_ctx, {batchs * (rows + 1)}); + phi::DenseTensor cols = phi::Empty(dev_ctx, {non_zero_num}); phi::DenseTensor values = phi::EmptyLike(dev_ctx, x.values()); - IntT* csr_crows_data = non_zero_crows.data(); - IntT* csr_cols_data = non_zero_cols.data(); + IntT* csr_crows_data = crows.data(); + IntT* csr_cols_data = cols.data(); T* csr_values_data = values.data(); - const auto& coo_indices = x.non_zero_indices(); + const auto& coo_indices = x.indices(); const auto& coo_values = x.values(); const IntT* batchs_ptr = coo_indices.data(); const IntT* coo_rows_data = @@ -413,7 +412,7 @@ void SparseCooToCsrGPUKernel(const GPUContext& dev_ctx, sizeof(T) * non_zero_num, gpuMemcpyDeviceToDevice, dev_ctx.stream()); - out->SetMember(non_zero_crows, non_zero_cols, values, x_dims); + out->SetMember(crows, cols, values, x_dims); } template @@ -421,7 +420,7 @@ void SparseCooToCsrKernel(const Context& dev_ctx, const SparseCooTensor& x, SparseCsrTensor* out) { PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "SparseCooToCsrGPUKernel", ([&] { + x.indices().dtype(), "SparseCooToCsrGPUKernel", ([&] { SparseCooToCsrGPUKernel(dev_ctx, x, out); })); } @@ -453,7 +452,7 @@ void SparseCooToDenseGPUKernel(const GPUContext& dev_ctx, DenseTensor* out) { const auto non_zero_num = x.nnz(); const auto dense_dims = x.dims(); - const auto indices = x.non_zero_indices(); + const auto indices = x.indices(); const auto values = x.values(); const auto indices_dims = indices.dims(); int64_t sparse_dim = indices_dims[0]; @@ -509,7 +508,7 @@ void SparseCooToDenseKernel(const Context& dev_ctx, const SparseCooTensor& x, DenseTensor* out) { PD_VISIT_BASE_INTEGRAL_TYPES( - x.non_zero_indices().dtype(), "SparseCooToDenseGPUKernel", ([&] { + x.indices().dtype(), "SparseCooToDenseGPUKernel", ([&] { SparseCooToDenseGPUKernel(dev_ctx, x, out); })); } -- GitLab