未验证 提交 a9cc5482 编写于 作者: Z zhangkaihuo 提交者: GitHub

[Sparse] Rename and fix doc (#46853)

上级 fe716a0b
...@@ -124,8 +124,8 @@ ...@@ -124,8 +124,8 @@
cast_csr_grad {sparse_csr, sparse_csr -> sparse_csr} cast_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
data_type : out_grad data_type : out_grad
- backward_op : conv3d_coo_grad - backward_op : conv3d_grad
forward : conv3d_coo (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) -> Tensor(out), Tensor(rulebook), Tensor(counter) forward : conv3d (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) -> Tensor(out), Tensor(rulebook), Tensor(counter)
args : (Tensor x, Tensor kernel, Tensor out, Tensor rulebook, Tensor counter, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) args : (Tensor x, Tensor kernel, Tensor out, Tensor rulebook, Tensor counter, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key)
output : Tensor(x_grad), Tensor(kernel_grad) output : Tensor(x_grad), Tensor(kernel_grad)
infer_meta : infer_meta :
......
...@@ -119,7 +119,7 @@ ...@@ -119,7 +119,7 @@
func : conv3d_coo{sparse_coo, dense -> sparse_coo, dense, dense} func : conv3d_coo{sparse_coo, dense -> sparse_coo, dense, dense}
layout : x layout : x
intermediate: rulebook, counter intermediate: rulebook, counter
backward : conv3d_coo_grad backward : conv3d_grad
- op : divide - op : divide
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
...@@ -139,8 +139,8 @@ ...@@ -139,8 +139,8 @@
func : UnchangedInferMeta func : UnchangedInferMeta
param : [x] param : [x]
kernel : kernel :
func : divide_coo_scalar{sparse_coo -> sparse_coo}, func : divide_scalar_coo{sparse_coo -> sparse_coo},
divide_csr_scalar{sparse_csr -> sparse_csr} divide_scalar_csr{sparse_csr -> sparse_csr}
backward : divide_scalar_grad backward : divide_scalar_grad
- op : expm1 - op : expm1
...@@ -393,7 +393,7 @@ ...@@ -393,7 +393,7 @@
infer_meta : infer_meta :
func : UnchangedInferMeta func : UnchangedInferMeta
kernel : kernel :
func: coalesce{sparse_coo -> sparse_coo} func: coalesce_coo{sparse_coo -> sparse_coo}
layout : x layout : x
- op: full_like - op: full_like
...@@ -403,8 +403,8 @@ ...@@ -403,8 +403,8 @@
func : CreateLikeInferMeta func : CreateLikeInferMeta
param : [x, dtype] param : [x, dtype]
kernel : kernel :
func : coo_full_like{sparse_coo -> sparse_coo}, func : full_like_coo{sparse_coo -> sparse_coo},
csr_full_like{sparse_csr -> sparse_csr} full_like_csr{sparse_csr -> sparse_csr}
layout : x layout : x
data_type : dtype data_type : dtype
......
...@@ -22,14 +22,14 @@ namespace phi { ...@@ -22,14 +22,14 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void CoalesceKernel(const Context& dev_ctx, void CoalesceCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
SparseCooTensor* out); SparseCooTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
SparseCooTensor Coalesce(const Context& dev_ctx, const SparseCooTensor& x) { SparseCooTensor CoalesceCoo(const Context& dev_ctx, const SparseCooTensor& x) {
SparseCooTensor coo; SparseCooTensor coo;
CoalesceKernel<T, Context>(dev_ctx, x, &coo); CoalesceCooKernel<T, Context>(dev_ctx, x, &coo);
return coo; return coo;
} }
......
...@@ -22,7 +22,7 @@ namespace phi { ...@@ -22,7 +22,7 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename IntT> template <typename T, typename IntT>
void CoalesceCPUKernel(const CPUContext& dev_ctx, void CoalesceCooCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
SparseCooTensor* out) { SparseCooTensor* out) {
const DenseTensor& x_indices = x.indices(); const DenseTensor& x_indices = x.indices();
...@@ -95,21 +95,22 @@ void CoalesceCPUKernel(const CPUContext& dev_ctx, ...@@ -95,21 +95,22 @@ void CoalesceCPUKernel(const CPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void CoalesceKernel(const Context& dev_ctx, void CoalesceCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
SparseCooTensor* out) { SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "CoalesceCPUKernel", ([&] { PD_VISIT_BASE_INTEGRAL_TYPES(
CoalesceCPUKernel<T, data_t>(dev_ctx, x, out); x.indices().dtype(), "CoalesceCooCPUKernel", ([&] {
CoalesceCooCPUKernel<T, data_t>(dev_ctx, x, out);
})); }));
} }
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(coalesce, PD_REGISTER_KERNEL(coalesce_coo,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::CoalesceKernel, phi::sparse::CoalesceCooKernel,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
......
...@@ -31,7 +31,7 @@ void FullValue(const Context& dev_ctx, DenseTensor* tensor, T val) { ...@@ -31,7 +31,7 @@ void FullValue(const Context& dev_ctx, DenseTensor* tensor, T val) {
} }
template <typename T, typename Context> template <typename T, typename Context>
void CooFullLikeKernel(const Context& dev_ctx, void FullLikeCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype,
...@@ -51,7 +51,7 @@ void CooFullLikeKernel(const Context& dev_ctx, ...@@ -51,7 +51,7 @@ void CooFullLikeKernel(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void CsrFullLikeKernel(const Context& dev_ctx, void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype,
...@@ -78,10 +78,10 @@ void CsrFullLikeKernel(const Context& dev_ctx, ...@@ -78,10 +78,10 @@ void CsrFullLikeKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(coo_full_like, PD_REGISTER_KERNEL(full_like_coo,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::CooFullLikeKernel, phi::FullLikeCooKernel,
float, float,
double, double,
uint8_t, uint8_t,
...@@ -96,10 +96,10 @@ PD_REGISTER_KERNEL(coo_full_like, ...@@ -96,10 +96,10 @@ PD_REGISTER_KERNEL(coo_full_like,
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
} }
PD_REGISTER_KERNEL(csr_full_like, PD_REGISTER_KERNEL(full_like_csr,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::CsrFullLikeKernel, phi::FullLikeCsrKernel,
float, float,
double, double,
uint8_t, uint8_t,
......
...@@ -25,7 +25,7 @@ namespace phi { ...@@ -25,7 +25,7 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void DivCooScalarKernel(const Context& dev_ctx, void DivScalarCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
float scalar, float scalar,
SparseCooTensor* out) { SparseCooTensor* out) {
...@@ -41,7 +41,7 @@ void DivCooScalarKernel(const Context& dev_ctx, ...@@ -41,7 +41,7 @@ void DivCooScalarKernel(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void DivCsrScalarKernel(const Context& dev_ctx, void DivScalarCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
float scalar, float scalar,
SparseCsrTensor* out) { SparseCsrTensor* out) {
...@@ -97,19 +97,19 @@ PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(expm1, Expm1) ...@@ -97,19 +97,19 @@ PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(expm1, Expm1)
PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(relu6, Relu6) PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(relu6, Relu6)
PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(leaky_relu, LeakyRelu) PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(leaky_relu, LeakyRelu)
PD_REGISTER_KERNEL(divide_coo_scalar, PD_REGISTER_KERNEL(divide_scalar_coo,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::DivCooScalarKernel, phi::sparse::DivScalarCooKernel,
float, float,
double) { double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
} }
PD_REGISTER_KERNEL(divide_csr_scalar, PD_REGISTER_KERNEL(divide_scalar_csr,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::DivCsrScalarKernel, phi::sparse::DivScalarCsrKernel,
float, float,
double) { double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
......
...@@ -22,14 +22,14 @@ ...@@ -22,14 +22,14 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void CooFullLikeKernel(const Context& dev_ctx, void FullLikeCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype,
SparseCooTensor* out); SparseCooTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
void CsrFullLikeKernel(const Context& dev_ctx, void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype,
......
...@@ -27,7 +27,7 @@ namespace phi { ...@@ -27,7 +27,7 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename IntT> template <typename T, typename IntT>
void CoalesceGPUKernel(const GPUContext& dev_ctx, void CoalesceCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
SparseCooTensor* out) { SparseCooTensor* out) {
const DenseTensor& x_indices = x.indices(); const DenseTensor& x_indices = x.indices();
...@@ -172,20 +172,21 @@ void CoalesceGPUKernel(const GPUContext& dev_ctx, ...@@ -172,20 +172,21 @@ void CoalesceGPUKernel(const GPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void CoalesceKernel(const Context& dev_ctx, void CoalesceCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
SparseCooTensor* out) { SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "CoalesceGPUKernel", ([&] { PD_VISIT_BASE_INTEGRAL_TYPES(
CoalesceGPUKernel<T, data_t>(dev_ctx, x, out); x.indices().dtype(), "CoalesceCooGPUKernel", ([&] {
CoalesceCooGPUKernel<T, data_t>(dev_ctx, x, out);
})); }));
} }
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(coalesce, PD_REGISTER_KERNEL(coalesce_coo,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::CoalesceKernel, phi::sparse::CoalesceCooKernel,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
......
...@@ -37,7 +37,7 @@ struct FullFunctor { ...@@ -37,7 +37,7 @@ struct FullFunctor {
}; };
template <typename T, typename Context> template <typename T, typename Context>
void CooFullLikeKernel(const Context& dev_ctx, void FullLikeCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype,
...@@ -60,7 +60,7 @@ void CooFullLikeKernel(const Context& dev_ctx, ...@@ -60,7 +60,7 @@ void CooFullLikeKernel(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void CsrFullLikeKernel(const Context& dev_ctx, void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype,
...@@ -87,10 +87,10 @@ void CsrFullLikeKernel(const Context& dev_ctx, ...@@ -87,10 +87,10 @@ void CsrFullLikeKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(coo_full_like, PD_REGISTER_KERNEL(full_like_coo,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::CooFullLikeKernel, phi::FullLikeCooKernel,
float, float,
double, double,
uint8_t, uint8_t,
...@@ -105,10 +105,10 @@ PD_REGISTER_KERNEL(coo_full_like, ...@@ -105,10 +105,10 @@ PD_REGISTER_KERNEL(coo_full_like,
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
} }
PD_REGISTER_KERNEL(csr_full_like, PD_REGISTER_KERNEL(full_like_csr,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::CsrFullLikeKernel, phi::FullLikeCsrKernel,
float, float,
double, double,
uint8_t, uint8_t,
......
...@@ -34,7 +34,7 @@ struct DivScalarFunctor { ...@@ -34,7 +34,7 @@ struct DivScalarFunctor {
}; };
template <typename T, typename Context> template <typename T, typename Context>
void DivCooScalarKernel(const Context& dev_ctx, void DivScalarCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
float scalar, float scalar,
SparseCooTensor* out) { SparseCooTensor* out) {
...@@ -47,7 +47,7 @@ void DivCooScalarKernel(const Context& dev_ctx, ...@@ -47,7 +47,7 @@ void DivCooScalarKernel(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void DivCsrScalarKernel(const Context& dev_ctx, void DivScalarCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
float scalar, float scalar,
SparseCsrTensor* out) { SparseCsrTensor* out) {
...@@ -102,19 +102,19 @@ PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(expm1, Expm1) ...@@ -102,19 +102,19 @@ PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(expm1, Expm1)
PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(relu6, Relu6) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(relu6, Relu6)
PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(leaky_relu, LeakyRelu) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(leaky_relu, LeakyRelu)
PD_REGISTER_KERNEL(divide_coo_scalar, PD_REGISTER_KERNEL(divide_scalar_coo,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::DivCooScalarKernel, phi::sparse::DivScalarCooKernel,
float, float,
double) { double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
} }
PD_REGISTER_KERNEL(divide_csr_scalar, PD_REGISTER_KERNEL(divide_scalar_csr,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::DivCsrScalarKernel, phi::sparse::DivScalarCsrKernel,
float, float,
double) { double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
......
...@@ -74,13 +74,13 @@ void ScaleCsrKernel(const Context& dev_ctx, ...@@ -74,13 +74,13 @@ void ScaleCsrKernel(const Context& dev_ctx,
SparseCsrTensor* out); SparseCsrTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
void DivCooScalarKernel(const Context& dev_ctx, void DivScalarCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
float scalar, float scalar,
SparseCooTensor* out); SparseCooTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
void DivCsrScalarKernel(const Context& dev_ctx, void DivScalarCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
float scalar, float scalar,
SparseCsrTensor* out); SparseCsrTensor* out);
......
...@@ -212,7 +212,7 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -212,7 +212,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
"Conv3d", "Conv3d",
&d_rulebook, &d_rulebook,
&d_counter); &d_counter);
SparseCooTensor tmp_d_out = sparse::Coalesce<T>(dev_ctx_gpu, d_out); SparseCooTensor tmp_d_out = sparse::CoalesceCoo<T>(dev_ctx_gpu, d_out);
ASSERT_EQ(correct_out_dims.size(), d_out.dims().size()); ASSERT_EQ(correct_out_dims.size(), d_out.dims().size());
ASSERT_EQ((int64_t)correct_out_features.size() / out_channels, d_out.nnz()); ASSERT_EQ((int64_t)correct_out_features.size() / out_channels, d_out.nnz());
......
...@@ -161,7 +161,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices, ...@@ -161,7 +161,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
&d_rulebook, &d_rulebook,
&d_counter); &d_counter);
SparseCooTensor tmp_d_out = sparse::Coalesce<T>(dev_ctx_gpu, d_out); SparseCooTensor tmp_d_out = sparse::CoalesceCoo<T>(dev_ctx_gpu, d_out);
ASSERT_EQ(correct_out_dims.size(), d_out.dims().size()); ASSERT_EQ(correct_out_dims.size(), d_out.dims().size());
ASSERT_EQ((int64_t)correct_out_features.size() / out_channels, d_out.nnz()); ASSERT_EQ((int64_t)correct_out_features.size() / out_channels, d_out.nnz());
......
...@@ -144,7 +144,7 @@ class Conv3D(_Conv3D): ...@@ -144,7 +144,7 @@ class Conv3D(_Conv3D):
Parameters: Parameters:
in_channels(int): The number of input channels in the input image. in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution. out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel. kernel_size(int|list|tuple): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1. stride_D = stride_H = stride_W = stride. The default value is 1.
...@@ -277,7 +277,7 @@ class SubmConv3D(_Conv3D): ...@@ -277,7 +277,7 @@ class SubmConv3D(_Conv3D):
Parameters: Parameters:
in_channels(int): The number of input channels in the input image. in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution. out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel. kernel_size(int|list|tuple): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1. stride_D = stride_H = stride_W = stride. The default value is 1.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册