From 016b94c253d07cfd9af7dd63d1b35cc124696743 Mon Sep 17 00:00:00 2001 From: zhangkaihuo Date: Mon, 22 Aug 2022 16:35:13 +0800 Subject: [PATCH] rename the member function of SparseTensor (#45291) --- paddle/phi/core/sparse_coo_tensor.h | 14 ++++++++++ paddle/phi/core/sparse_csr_tensor.cc | 6 ++--- paddle/phi/core/sparse_csr_tensor.h | 21 +++++++++++++++ .../phi/kernels/sparse/gpu/coalesce_kernel.cu | 4 +-- .../kernels/sparse/gpu/conv_grad_kernel.cu | 25 +++++++++-------- paddle/phi/kernels/sparse/gpu/conv_kernel.cu | 4 +-- .../phi/kernels/sparse/gpu/convolution.cu.h | 14 +++++----- paddle/phi/kernels/sparse/gpu/full_kernel.cu | 8 +++--- .../sparse/gpu/fused_attention_grad_kernel.cu | 6 ++--- .../sparse/gpu/fused_attention_kernel.cu | 4 +-- paddle/phi/kernels/sparse/gpu/mask_kernel.cu | 27 +++++++++---------- .../phi/kernels/sparse/gpu/mv_grad_kernel.cu | 4 +-- .../kernels/sparse/gpu/pool_grad_kernel.cu | 8 +++--- paddle/phi/kernels/sparse/gpu/pool_kernel.cu | 6 ++--- .../kernels/sparse/gpu/softmax_grad_kernel.cu | 6 ++--- .../phi/kernels/sparse/gpu/softmax_kernel.cu | 4 +-- .../kernels/sparse/gpu/sparse_utils_kernel.cu | 18 ++++++------- paddle/phi/kernels/sparse/gpu/unary_kernel.cu | 8 +++--- 18 files changed, 108 insertions(+), 79 deletions(-) diff --git a/paddle/phi/core/sparse_coo_tensor.h b/paddle/phi/core/sparse_coo_tensor.h index 300ae8a0ab9..ba85a751dc0 100644 --- a/paddle/phi/core/sparse_coo_tensor.h +++ b/paddle/phi/core/sparse_coo_tensor.h @@ -63,10 +63,16 @@ class SparseCooTensor : public TensorBase, /// \brief Returns the indices of non zero elemetns in original dense tensor. /// \return The indices of non zero elemetns in original dense tensor. + const DenseTensor& indices() const { return non_zero_indices_; } + + /// Note: This function will removed soon. It is recommended to use indices() const DenseTensor& non_zero_indices() const { return non_zero_indices_; } /// \brief Returns the non zero elemetns in original dense tensor. /// \return The non zero elemetns in original dense tensor. + const DenseTensor& values() const { return non_zero_elements_; } + + /// Note: This function will removed soon. It is recommended to use values() const DenseTensor& non_zero_elements() const { return non_zero_elements_; } /// \brief Returns whether the indices has coalesced @@ -136,10 +142,18 @@ class SparseCooTensor : public TensorBase, /// \brief Get a mutable pointer of non_zero_indices_. /// return a mutable pointer of non_zero_indices_. + DenseTensor* mutable_indices() { return &non_zero_indices_; } + + /// Note: This function will removed soon. It is recommended to use + /// mutable_indices() DenseTensor* mutable_non_zero_indices() { return &non_zero_indices_; } /// \brief Get a mutable pointer of non_zero_elements. /// return a mutable pointer of non_zero_elements. + DenseTensor* mutable_values() { return &non_zero_elements_; } + + /// Note: This function will removed soon. It is recommended to use + /// mutable_values() DenseTensor* mutable_non_zero_elements() { return &non_zero_elements_; } /// \brief This function is not recommended diff --git a/paddle/phi/core/sparse_csr_tensor.cc b/paddle/phi/core/sparse_csr_tensor.cc index 447fab0e33c..45131f48338 100644 --- a/paddle/phi/core/sparse_csr_tensor.cc +++ b/paddle/phi/core/sparse_csr_tensor.cc @@ -72,9 +72,9 @@ SparseCsrTensor::SparseCsrTensor(const SparseCsrTensor& other) SparseCsrTensor& SparseCsrTensor::operator=(const SparseCsrTensor& other) { this->dims_ = other.dims(); - this->non_zero_crows_ = other.non_zero_crows(); - this->non_zero_cols_ = other.non_zero_cols(); - this->non_zero_elements_ = other.non_zero_elements(); + this->non_zero_crows_ = other.crows(); + this->non_zero_cols_ = other.cols(); + this->non_zero_elements_ = other.values(); return *this; } diff --git a/paddle/phi/core/sparse_csr_tensor.h b/paddle/phi/core/sparse_csr_tensor.h index 0da69ee7ed1..ee47e39f97f 100644 --- a/paddle/phi/core/sparse_csr_tensor.h +++ b/paddle/phi/core/sparse_csr_tensor.h @@ -74,15 +74,24 @@ class SparseCsrTensor : public TensorBase, /// dense tensor. /// \return The compressed row index of non zero elemetns in original dense /// tensor. + const DenseTensor& crows() const { return non_zero_crows_; } + + /// Note: This function will removed soon. It is recommended to use crows() const DenseTensor& non_zero_crows() const { return non_zero_crows_; } /// \brief Returns the column index of non zero elemetns in original dense /// tensor. /// \return The column index of non zero elemetns in original dense tensor. + const DenseTensor& cols() const { return non_zero_cols_; } + + /// Note: This function will removed soon. It is recommended to use cols() const DenseTensor& non_zero_cols() const { return non_zero_cols_; } /// \brief Returns the non zero elemetns in original dense tensor. /// \return The non zero elemetns in original dense tensor. + const DenseTensor& values() const { return non_zero_elements_; } + + /// Note: This function will removed soon. It is recommended to use indices() const DenseTensor& non_zero_elements() const { return non_zero_elements_; } /// \brief Returns the total number of non zero elements in original dense @@ -138,14 +147,26 @@ class SparseCsrTensor : public TensorBase, /// \brief Get a mutable pointer of non_zero_crows. /// return a mutable pointer of non_zero_crows. + DenseTensor* mutable_crows() { return &non_zero_crows_; } + + /// Note: This function will removed soon. It is recommended to use + /// mutable_crows() DenseTensor* mutable_non_zero_crows() { return &non_zero_crows_; } /// \brief Get a mutable pointer of non_zero_cols. /// return a mutable pointer of non_zero_cols. + DenseTensor* mutable_cols() { return &non_zero_cols_; } + + /// Note: This function will removed soon. It is recommended to use + /// mutable_cols() DenseTensor* mutable_non_zero_cols() { return &non_zero_cols_; } /// \brief Get a mutable pointer of non_zero_elements. /// return a mutable pointer of non_zero_elements. + DenseTensor* mutable_values() { return &non_zero_elements_; } + + /// Note: This function will removed soon. It is recommended to use + /// mutable_values() DenseTensor* mutable_non_zero_elements() { return &non_zero_elements_; } /// \brief set the dims of original dense tensor diff --git a/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu b/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu index c7e7849083a..d4792804f0b 100644 --- a/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/coalesce_kernel.cu @@ -31,7 +31,7 @@ void CoalesceGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, SparseCooTensor* out) { const DenseTensor& x_indices = x.non_zero_indices(); - const DenseTensor& x_values = x.non_zero_elements(); + const DenseTensor& x_values = x.values(); DenseTensor out_indices = phi::EmptyLike(dev_ctx, x_indices); DenseTensor out_values = phi::EmptyLike(dev_ctx, x_values); @@ -73,7 +73,7 @@ void CoalesceGPUKernel(const GPUContext& dev_ctx, // 2. get the address of each non-zero values const T* x_values_ptr = x_values.data(); const int64_t stride = - x.dims().size() == sparse_dim ? 1 : x.non_zero_elements().dims()[1]; + x.dims().size() == sparse_dim ? 1 : x.values().dims()[1]; DenseTensor values_indexs = phi::Empty( dev_ctx, DenseTensorMeta(DataType::INT32, {nnz}, DataLayout::NCHW)); int* values_indexs_ptr = values_indexs.data(); diff --git a/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu index 9cbd75ed4ea..9461cec7b26 100644 --- a/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/conv_grad_kernel.cu @@ -81,7 +81,7 @@ void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, auto blas = phi::funcs::GetBlas(dev_ctx); DenseTensor x_grad_indices = phi::EmptyLike(dev_ctx, x.non_zero_indices()); - DenseTensor x_grad_values = phi::EmptyLike(dev_ctx, x.non_zero_elements()); + DenseTensor x_grad_values = phi::EmptyLike(dev_ctx, x.values()); T* x_grad_values_ptr = x_grad_values.data(); phi::backends::gpu::GpuMemsetAsync(x_grad_values_ptr, 0, @@ -109,16 +109,15 @@ void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, offsets[kernel_size] = offset; if (subm) { - phi::funcs::sparse::SubmPreProcess( - dev_ctx, - x, - kernel, - out_grad.non_zero_elements(), - in_channels, - out_channels, - half_kernel_size, - kernel_grad, - &x_grad_values); + phi::funcs::sparse::SubmPreProcess(dev_ctx, + x, + kernel, + out_grad.values(), + in_channels, + out_channels, + half_kernel_size, + kernel_grad, + &x_grad_values); if (max_count == 0) { return; } @@ -181,7 +180,7 @@ void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, unique_value_ptr); GatherV2(dev_ctx, - x.non_zero_elements().data(), + x.values().data(), out_index_ptr, unique_value_ptr, x.nnz(), @@ -192,7 +191,7 @@ void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, in_features_ptr); Gather(dev_ctx, - out_grad.non_zero_elements().data(), + out_grad.values().data(), rulebook_ptr + rulebook_len, rulebook_len, out_channels, diff --git a/paddle/phi/kernels/sparse/gpu/conv_kernel.cu b/paddle/phi/kernels/sparse/gpu/conv_kernel.cu index 1a2b3134657..dc6253b725d 100644 --- a/paddle/phi/kernels/sparse/gpu/conv_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/conv_kernel.cu @@ -131,7 +131,7 @@ void Conv3dCooGPUKernel(const GPUContext& dev_ctx, set_zero(dev_ctx, &out_features, static_cast(0.0f)); Gather(dev_ctx, - x.non_zero_elements().data(), + x.values().data(), rulebook_ptr, rulebook_len, in_channels, @@ -139,7 +139,7 @@ void Conv3dCooGPUKernel(const GPUContext& dev_ctx, // 3. call gemm for every werght auto blas = phi::funcs::GetBlas(dev_ctx); - auto* out_values = out->mutable_non_zero_elements(); + auto* out_values = out->mutable_values(); T* out_values_ptr = out_values->data(); set_zero(dev_ctx, out_values, static_cast(0.0f)); diff --git a/paddle/phi/kernels/sparse/gpu/convolution.cu.h b/paddle/phi/kernels/sparse/gpu/convolution.cu.h index 2591d24bfe4..6df1969aad3 100644 --- a/paddle/phi/kernels/sparse/gpu/convolution.cu.h +++ b/paddle/phi/kernels/sparse/gpu/convolution.cu.h @@ -515,9 +515,8 @@ int ProductRuleBook(const Context& dev_ctx, const int64_t sparse_dim = 4; DenseTensorMeta indices_meta( indices_dtype, {sparse_dim, out_non_zero_num}, DataLayout::NCHW); - DenseTensorMeta values_meta(x.dtype(), - {out_non_zero_num, kernel_sizes[4]}, - x.non_zero_elements().layout()); + DenseTensorMeta values_meta( + x.dtype(), {out_non_zero_num, kernel_sizes[4]}, x.values().layout()); phi::DenseTensor out_indices = phi::Empty(dev_ctx, std::move(indices_meta)); phi::DenseTensor out_values = phi::Empty(dev_ctx, std::move(values_meta)); @@ -541,11 +540,10 @@ int ProductRuleBook(const Context& dev_ctx, } else { DenseTensor out_indices = phi::EmptyLike(dev_ctx, x.non_zero_indices()); - DenseTensor out_values = - phi::Empty(dev_ctx, - DenseTensorMeta(x.dtype(), - {x.nnz(), kernel_sizes[4]}, - x.non_zero_elements().layout())); + DenseTensor out_values = phi::Empty( + dev_ctx, + DenseTensorMeta( + x.dtype(), {x.nnz(), kernel_sizes[4]}, x.values().layout())); phi::Copy( dev_ctx, x.non_zero_indices(), dev_ctx.GetPlace(), false, &out_indices); out->SetMember(out_indices, out_values, out_dims, true); diff --git a/paddle/phi/kernels/sparse/gpu/full_kernel.cu b/paddle/phi/kernels/sparse/gpu/full_kernel.cu index a3dc5a9534b..acc4d1398cb 100644 --- a/paddle/phi/kernels/sparse/gpu/full_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/full_kernel.cu @@ -48,8 +48,8 @@ void CooFullLikeKernel(const Context& dev_ctx, false, out->mutable_non_zero_indices()); - DenseTensor* values = out->mutable_non_zero_elements(); - values->Resize(x.non_zero_elements().dims()); + DenseTensor* values = out->mutable_values(); + values->Resize(x.values().dims()); dev_ctx.template Alloc(values); std::vector inputs = {}; @@ -80,8 +80,8 @@ void CsrFullLikeKernel(const Context& dev_ctx, false, out->mutable_non_zero_cols()); - DenseTensor* values = out->mutable_non_zero_elements(); - values->Resize(x.non_zero_elements().dims()); + DenseTensor* values = out->mutable_values(); + values->Resize(x.values().dims()); dev_ctx.template Alloc(values); std::vector inputs = {}; diff --git a/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu index 5be45605983..f3fabb05928 100644 --- a/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/fused_attention_grad_kernel.cu @@ -94,9 +94,9 @@ void FusedAttentionCsrGradKernel(const Context& dev_ctx, AttnSoftmaxGpuGradKernel<<>>( softmax.non_zero_crows().data(), - softmax.non_zero_elements().data(), - dsoftmax.mutable_non_zero_elements()->data(), - d_sdd_result.mutable_non_zero_elements()->data(), + softmax.values().data(), + dsoftmax.mutable_values()->data(), + d_sdd_result.mutable_values()->data(), M, total_row_num, std::sqrt(N), diff --git a/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu b/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu index 8761319ee8d..f98d913b957 100644 --- a/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/fused_attention_kernel.cu @@ -202,10 +202,10 @@ void FusedAttentionCsrKernel( AttnSoftmaxGpuKernel<<>>( sdd_result.non_zero_crows().data(), sdd_result.non_zero_cols().data(), - sdd_result.non_zero_elements().data(), + sdd_result.values().data(), kp_mask_ptr ? kp_mask_ptr->data() : nullptr, attn_mask_ptr ? attn_mask_ptr->data() : nullptr, - softmax->mutable_non_zero_elements()->data(), + softmax->mutable_values()->data(), M, total_row_num, q_dim[1], diff --git a/paddle/phi/kernels/sparse/gpu/mask_kernel.cu b/paddle/phi/kernels/sparse/gpu/mask_kernel.cu index 5b182637883..3865400dcfe 100644 --- a/paddle/phi/kernels/sparse/gpu/mask_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/mask_kernel.cu @@ -59,7 +59,7 @@ void SparseMaskGPUKernel(const GPUContext& dev_ctx, mask.dims(), phi::errors::InvalidArgument("the input x and mask must have the shape")); const DenseTensor& indices = mask.non_zero_indices(); - const DenseTensor& values = mask.non_zero_elements(); + const DenseTensor& values = mask.values(); const int sparse_dim = mask.sparse_dim(); DenseTensor sparse_offsets = phi::Empty( dev_ctx, @@ -224,8 +224,8 @@ void SparseMaskHelperGPUKernel(const GPUContext& dev_ctx, phi::backends::gpu::GpuMemsetAsync( table.data(), 0, table_size * sizeof(int), dev_ctx.stream()); const int64_t stride = - x.dims().size() == sparse_dim ? 1 : x.non_zero_elements().dims()[1]; - *out = phi::EmptyLike(dev_ctx, x.non_zero_elements()); + x.dims().size() == sparse_dim ? 1 : x.values().dims()[1]; + *out = phi::EmptyLike(dev_ctx, x.values()); phi::funcs::SetConstant set_zero; set_zero(dev_ctx, out, static_cast(0)); T* out_ptr = out->data(); @@ -242,16 +242,15 @@ void SparseMaskHelperGPUKernel(const GPUContext& dev_ctx, const int VecBytes = 16; const int VecSize = VecBytes / sizeof(T); if (stride % VecSize == 0) { - MaskCopy - <<>>(mask_indexs_ptr, - table.data(), - mask_indexs.numel(), - stride, - x.non_zero_elements().data(), - out_ptr); + MaskCopy<<>>(mask_indexs_ptr, + table.data(), + mask_indexs.numel(), + stride, + x.values().data(), + out_ptr); } else { MaskCopy<<(), mask_indexs.numel(), stride, - x.non_zero_elements().data(), + x.values().data(), out_ptr); } } diff --git a/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu index 2344325d951..c4169c275e4 100644 --- a/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/mv_grad_kernel.cu @@ -79,7 +79,7 @@ void MvCooGradKernel(const Context &dev_ctx, dev_ctx.stream()>>>(dout.data(), vec.data(), dx->non_zero_indices().data(), - dx->mutable_non_zero_elements()->data(), + dx->mutable_values()->data(), dx->nnz()); })); } @@ -127,7 +127,7 @@ void MvCsrGradKernel(const Context &dev_ctx, vec.data(), dx->non_zero_crows().data(), dx->non_zero_cols().data(), - dx->mutable_non_zero_elements()->data(), + dx->mutable_values()->data(), row_number); })); } diff --git a/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu index 39973e4230d..5902fc6bad8 100644 --- a/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/pool_grad_kernel.cu @@ -68,13 +68,13 @@ void MaxPoolCooGradGPUKernel(const GPUContext& dev_ctx, const int* counter_ptr = counter.data(); phi::funcs::sparse::PrefixSum(counter_ptr, &offsets[0], kernel_size); - const T* in_features_ptr = x.non_zero_elements().data(); - const T* out_features_ptr = out.non_zero_elements().data(); - const T* out_grad_ptr = out_grad.non_zero_elements().data(); + const T* in_features_ptr = x.values().data(); + const T* out_features_ptr = out.values().data(); + const T* out_grad_ptr = out_grad.values().data(); // TODO(zhangkaihuo): call phi::sparse::EmptyLike DenseTensor x_grad_indices = phi::EmptyLike(dev_ctx, x.non_zero_indices()); - DenseTensor x_grad_values = phi::EmptyLike(dev_ctx, x.non_zero_elements()); + DenseTensor x_grad_values = phi::EmptyLike(dev_ctx, x.values()); x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true); T* x_grad_ptr = x_grad_values.data(); phi::funcs::SetConstant set_zero; diff --git a/paddle/phi/kernels/sparse/gpu/pool_kernel.cu b/paddle/phi/kernels/sparse/gpu/pool_kernel.cu index 2480a905c21..9307513daa8 100644 --- a/paddle/phi/kernels/sparse/gpu/pool_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/pool_kernel.cu @@ -95,8 +95,8 @@ void MaxPoolCooGPUKernel(const GPUContext& dev_ctx, const IntT* rulebook_ptr = rulebook->data(); - T* out_features_ptr = out->mutable_non_zero_elements()->data(); - const T* in_features_ptr = x.non_zero_elements().data(); + T* out_features_ptr = out->mutable_values()->data(); + const T* in_features_ptr = x.values().data(); counter->Resize({kernel_size}); int* counter_ptr = dev_ctx.template HostAlloc(counter); memcpy(counter_ptr, h_counter.data(), h_counter.size() * sizeof(int)); @@ -107,7 +107,7 @@ void MaxPoolCooGPUKernel(const GPUContext& dev_ctx, thrust::fill(thrust::cuda::par.on(dev_ctx.stream()), #endif out_features_ptr, - out_features_ptr + out->non_zero_elements().numel(), + out_features_ptr + out->values().numel(), static_cast(0)); // TODO(zhangkaihuo) Replacing multiple calls with one kernel may be faster for (int i = 0; i < kernel_size; i++) { diff --git a/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu b/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu index 33165e29359..6ebd1e29a59 100644 --- a/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/softmax_grad_kernel.cu @@ -96,9 +96,9 @@ void SoftmaxCsrGradKernel(const Context& dev_ctx, out.non_zero_crows().dtype(), "SoftmaxCsrGradKernel", ([&] { SoftmaxGradGpuKernel<<>>( out.non_zero_crows().data(), - out.non_zero_elements().data(), - dout.non_zero_elements().data(), - dx->mutable_non_zero_elements()->data(), + out.values().data(), + dout.values().data(), + dx->mutable_values()->data(), row_number, total_row_number); })); diff --git a/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu b/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu index 05f200f9b02..3c283ed1324 100644 --- a/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/softmax_kernel.cu @@ -109,8 +109,8 @@ void SoftmaxCsrKernel(const Context& dev_ctx, x.non_zero_crows().dtype(), "CsrSoftmaxKernel", ([&] { SoftmaxGpuKernel<<>>( x.non_zero_crows().data(), - x.non_zero_elements().data(), - out->mutable_non_zero_elements()->data(), + x.values().data(), + out->mutable_values()->data(), row_number, total_row_number); })); diff --git a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu index 97221c94892..e31f3af5200 100644 --- a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu @@ -215,7 +215,7 @@ void SparseCsrToCooGPUKernel(const GPUContext& dev_ctx, const int64_t non_zero_num = x.non_zero_cols().numel(); const auto& csr_crows = x.non_zero_crows(); const auto& csr_cols = x.non_zero_cols(); - const auto& csr_values = x.non_zero_elements(); + const auto& csr_values = x.values(); const IntT* csr_crows_data = csr_crows.data(); const IntT* csr_cols_data = csr_cols.data(); const T* csr_values_data = csr_values.data(); @@ -361,14 +361,13 @@ void SparseCooToCsrGPUKernel(const GPUContext& dev_ctx, phi::DenseTensor non_zero_crows = phi::Empty(dev_ctx, {batchs * (rows + 1)}); phi::DenseTensor non_zero_cols = phi::Empty(dev_ctx, {non_zero_num}); - phi::DenseTensor non_zero_elements = - phi::EmptyLike(dev_ctx, x.non_zero_elements()); + phi::DenseTensor values = phi::EmptyLike(dev_ctx, x.values()); IntT* csr_crows_data = non_zero_crows.data(); IntT* csr_cols_data = non_zero_cols.data(); - T* csr_values_data = non_zero_elements.data(); + T* csr_values_data = values.data(); const auto& coo_indices = x.non_zero_indices(); - const auto& coo_values = x.non_zero_elements(); + const auto& coo_values = x.values(); const IntT* batchs_ptr = coo_indices.data(); const IntT* coo_rows_data = x_dims.size() == 2 ? batchs_ptr : batchs_ptr + non_zero_num; @@ -414,7 +413,7 @@ void SparseCooToCsrGPUKernel(const GPUContext& dev_ctx, sizeof(T) * non_zero_num, gpuMemcpyDeviceToDevice, dev_ctx.stream()); - out->SetMember(non_zero_crows, non_zero_cols, non_zero_elements, x_dims); + out->SetMember(non_zero_crows, non_zero_cols, values, x_dims); } template @@ -455,7 +454,7 @@ void SparseCooToDenseGPUKernel(const GPUContext& dev_ctx, const auto non_zero_num = x.nnz(); const auto dense_dims = x.dims(); const auto indices = x.non_zero_indices(); - const auto values = x.non_zero_elements(); + const auto values = x.values(); const auto indices_dims = indices.dims(); int64_t sparse_dim = indices_dims[0]; if (indices_dims.size() == 1) { @@ -465,9 +464,8 @@ void SparseCooToDenseGPUKernel(const GPUContext& dev_ctx, const auto place = dev_ctx.GetPlace(); const T* x_data = values.data(); - *out = phi::Empty(dev_ctx, - phi::DenseTensorMeta( - x.dtype(), x.dims(), x.non_zero_elements().layout())); + *out = phi::Empty( + dev_ctx, phi::DenseTensorMeta(x.dtype(), x.dims(), x.values().layout())); T* out_data = out->data(); int64_t base_offset = 1; for (int64_t i = 0; i < dense_dim; i++) { diff --git a/paddle/phi/kernels/sparse/gpu/unary_kernel.cu b/paddle/phi/kernels/sparse/gpu/unary_kernel.cu index b03f508a325..c2d3dec047a 100644 --- a/paddle/phi/kernels/sparse/gpu/unary_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/unary_kernel.cu @@ -40,8 +40,8 @@ void DivCooScalarKernel(const Context& dev_ctx, SparseCooTensor* out) { EmptyLikeCooKernel(dev_ctx, x, out); - std::vector ins = {&(x.non_zero_elements())}; - std::vector outs = {out->mutable_non_zero_elements()}; + std::vector ins = {&(x.values())}; + std::vector outs = {out->mutable_values()}; DivScalarFunctor func(static_cast(scalar)); funcs::ElementwiseKernel>(dev_ctx, ins, &outs, func); } @@ -53,8 +53,8 @@ void DivCsrScalarKernel(const Context& dev_ctx, SparseCsrTensor* out) { EmptyLikeCsrKernel(dev_ctx, x, out); - std::vector ins = {&(x.non_zero_elements())}; - std::vector outs = {out->mutable_non_zero_elements()}; + std::vector ins = {&(x.values())}; + std::vector outs = {out->mutable_values()}; DivScalarFunctor func(static_cast(scalar)); funcs::ElementwiseKernel>(dev_ctx, ins, &outs, func); } -- GitLab