未验证 提交 a9cc5482 编写于 作者: Z zhangkaihuo 提交者: GitHub

[Sparse] Rename and fix doc (#46853)

上级 fe716a0b
......@@ -124,8 +124,8 @@
cast_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
data_type : out_grad
- backward_op : conv3d_coo_grad
forward : conv3d_coo (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) -> Tensor(out), Tensor(rulebook), Tensor(counter)
- backward_op : conv3d_grad
forward : conv3d (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) -> Tensor(out), Tensor(rulebook), Tensor(counter)
args : (Tensor x, Tensor kernel, Tensor out, Tensor rulebook, Tensor counter, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key)
output : Tensor(x_grad), Tensor(kernel_grad)
infer_meta :
......
......@@ -119,7 +119,7 @@
func : conv3d_coo{sparse_coo, dense -> sparse_coo, dense, dense}
layout : x
intermediate: rulebook, counter
backward : conv3d_coo_grad
backward : conv3d_grad
- op : divide
args : (Tensor x, Tensor y)
......@@ -139,8 +139,8 @@
func : UnchangedInferMeta
param : [x]
kernel :
func : divide_coo_scalar{sparse_coo -> sparse_coo},
divide_csr_scalar{sparse_csr -> sparse_csr}
func : divide_scalar_coo{sparse_coo -> sparse_coo},
divide_scalar_csr{sparse_csr -> sparse_csr}
backward : divide_scalar_grad
- op : expm1
......@@ -393,7 +393,7 @@
infer_meta :
func : UnchangedInferMeta
kernel :
func: coalesce{sparse_coo -> sparse_coo}
func: coalesce_coo{sparse_coo -> sparse_coo}
layout : x
- op: full_like
......@@ -403,8 +403,8 @@
func : CreateLikeInferMeta
param : [x, dtype]
kernel :
func : coo_full_like{sparse_coo -> sparse_coo},
csr_full_like{sparse_csr -> sparse_csr}
func : full_like_coo{sparse_coo -> sparse_coo},
full_like_csr{sparse_csr -> sparse_csr}
layout : x
data_type : dtype
......
......@@ -22,14 +22,14 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void CoalesceKernel(const Context& dev_ctx,
void CoalesceCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
SparseCooTensor* out);
template <typename T, typename Context>
SparseCooTensor Coalesce(const Context& dev_ctx, const SparseCooTensor& x) {
SparseCooTensor CoalesceCoo(const Context& dev_ctx, const SparseCooTensor& x) {
SparseCooTensor coo;
CoalesceKernel<T, Context>(dev_ctx, x, &coo);
CoalesceCooKernel<T, Context>(dev_ctx, x, &coo);
return coo;
}
......
......@@ -22,7 +22,7 @@ namespace phi {
namespace sparse {
template <typename T, typename IntT>
void CoalesceCPUKernel(const CPUContext& dev_ctx,
void CoalesceCooCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x,
SparseCooTensor* out) {
const DenseTensor& x_indices = x.indices();
......@@ -95,21 +95,22 @@ void CoalesceCPUKernel(const CPUContext& dev_ctx,
}
template <typename T, typename Context>
void CoalesceKernel(const Context& dev_ctx,
void CoalesceCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "CoalesceCPUKernel", ([&] {
CoalesceCPUKernel<T, data_t>(dev_ctx, x, out);
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "CoalesceCooCPUKernel", ([&] {
CoalesceCooCPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(coalesce,
PD_REGISTER_KERNEL(coalesce_coo,
CPU,
ALL_LAYOUT,
phi::sparse::CoalesceKernel,
phi::sparse::CoalesceCooKernel,
float,
double,
phi::dtype::float16,
......
......@@ -31,7 +31,7 @@ void FullValue(const Context& dev_ctx, DenseTensor* tensor, T val) {
}
template <typename T, typename Context>
void CooFullLikeKernel(const Context& dev_ctx,
void FullLikeCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const Scalar& val,
DataType dtype,
......@@ -51,7 +51,7 @@ void CooFullLikeKernel(const Context& dev_ctx,
}
template <typename T, typename Context>
void CsrFullLikeKernel(const Context& dev_ctx,
void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const Scalar& val,
DataType dtype,
......@@ -78,10 +78,10 @@ void CsrFullLikeKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(coo_full_like,
PD_REGISTER_KERNEL(full_like_coo,
CPU,
ALL_LAYOUT,
phi::CooFullLikeKernel,
phi::FullLikeCooKernel,
float,
double,
uint8_t,
......@@ -96,10 +96,10 @@ PD_REGISTER_KERNEL(coo_full_like,
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(csr_full_like,
PD_REGISTER_KERNEL(full_like_csr,
CPU,
ALL_LAYOUT,
phi::CsrFullLikeKernel,
phi::FullLikeCsrKernel,
float,
double,
uint8_t,
......
......@@ -25,7 +25,7 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void DivCooScalarKernel(const Context& dev_ctx,
void DivScalarCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
float scalar,
SparseCooTensor* out) {
......@@ -41,7 +41,7 @@ void DivCooScalarKernel(const Context& dev_ctx,
}
template <typename T, typename Context>
void DivCsrScalarKernel(const Context& dev_ctx,
void DivScalarCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
float scalar,
SparseCsrTensor* out) {
......@@ -97,19 +97,19 @@ PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(expm1, Expm1)
PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(relu6, Relu6)
PD_REGISTER_SPARSE_UNARY_CPU_KERNEL(leaky_relu, LeakyRelu)
PD_REGISTER_KERNEL(divide_coo_scalar,
PD_REGISTER_KERNEL(divide_scalar_coo,
CPU,
ALL_LAYOUT,
phi::sparse::DivCooScalarKernel,
phi::sparse::DivScalarCooKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(divide_csr_scalar,
PD_REGISTER_KERNEL(divide_scalar_csr,
CPU,
ALL_LAYOUT,
phi::sparse::DivCsrScalarKernel,
phi::sparse::DivScalarCsrKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
......
......@@ -22,14 +22,14 @@
namespace phi {
template <typename T, typename Context>
void CooFullLikeKernel(const Context& dev_ctx,
void FullLikeCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const Scalar& val,
DataType dtype,
SparseCooTensor* out);
template <typename T, typename Context>
void CsrFullLikeKernel(const Context& dev_ctx,
void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const Scalar& val,
DataType dtype,
......
......@@ -27,7 +27,7 @@ namespace phi {
namespace sparse {
template <typename T, typename IntT>
void CoalesceGPUKernel(const GPUContext& dev_ctx,
void CoalesceCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
SparseCooTensor* out) {
const DenseTensor& x_indices = x.indices();
......@@ -172,20 +172,21 @@ void CoalesceGPUKernel(const GPUContext& dev_ctx,
}
template <typename T, typename Context>
void CoalesceKernel(const Context& dev_ctx,
void CoalesceCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(x.indices().dtype(), "CoalesceGPUKernel", ([&] {
CoalesceGPUKernel<T, data_t>(dev_ctx, x, out);
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "CoalesceCooGPUKernel", ([&] {
CoalesceCooGPUKernel<T, data_t>(dev_ctx, x, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(coalesce,
PD_REGISTER_KERNEL(coalesce_coo,
GPU,
ALL_LAYOUT,
phi::sparse::CoalesceKernel,
phi::sparse::CoalesceCooKernel,
float,
double,
phi::dtype::float16,
......
......@@ -37,7 +37,7 @@ struct FullFunctor {
};
template <typename T, typename Context>
void CooFullLikeKernel(const Context& dev_ctx,
void FullLikeCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const Scalar& val,
DataType dtype,
......@@ -60,7 +60,7 @@ void CooFullLikeKernel(const Context& dev_ctx,
}
template <typename T, typename Context>
void CsrFullLikeKernel(const Context& dev_ctx,
void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const Scalar& val,
DataType dtype,
......@@ -87,10 +87,10 @@ void CsrFullLikeKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(coo_full_like,
PD_REGISTER_KERNEL(full_like_coo,
GPU,
ALL_LAYOUT,
phi::CooFullLikeKernel,
phi::FullLikeCooKernel,
float,
double,
uint8_t,
......@@ -105,10 +105,10 @@ PD_REGISTER_KERNEL(coo_full_like,
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(csr_full_like,
PD_REGISTER_KERNEL(full_like_csr,
GPU,
ALL_LAYOUT,
phi::CsrFullLikeKernel,
phi::FullLikeCsrKernel,
float,
double,
uint8_t,
......
......@@ -34,7 +34,7 @@ struct DivScalarFunctor {
};
template <typename T, typename Context>
void DivCooScalarKernel(const Context& dev_ctx,
void DivScalarCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
float scalar,
SparseCooTensor* out) {
......@@ -47,7 +47,7 @@ void DivCooScalarKernel(const Context& dev_ctx,
}
template <typename T, typename Context>
void DivCsrScalarKernel(const Context& dev_ctx,
void DivScalarCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
float scalar,
SparseCsrTensor* out) {
......@@ -102,19 +102,19 @@ PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(expm1, Expm1)
PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(relu6, Relu6)
PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(leaky_relu, LeakyRelu)
PD_REGISTER_KERNEL(divide_coo_scalar,
PD_REGISTER_KERNEL(divide_scalar_coo,
GPU,
ALL_LAYOUT,
phi::sparse::DivCooScalarKernel,
phi::sparse::DivScalarCooKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(divide_csr_scalar,
PD_REGISTER_KERNEL(divide_scalar_csr,
GPU,
ALL_LAYOUT,
phi::sparse::DivCsrScalarKernel,
phi::sparse::DivScalarCsrKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
......
......@@ -74,13 +74,13 @@ void ScaleCsrKernel(const Context& dev_ctx,
SparseCsrTensor* out);
template <typename T, typename Context>
void DivCooScalarKernel(const Context& dev_ctx,
void DivScalarCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
float scalar,
SparseCooTensor* out);
template <typename T, typename Context>
void DivCsrScalarKernel(const Context& dev_ctx,
void DivScalarCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
float scalar,
SparseCsrTensor* out);
......
......@@ -212,7 +212,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
"Conv3d",
&d_rulebook,
&d_counter);
SparseCooTensor tmp_d_out = sparse::Coalesce<T>(dev_ctx_gpu, d_out);
SparseCooTensor tmp_d_out = sparse::CoalesceCoo<T>(dev_ctx_gpu, d_out);
ASSERT_EQ(correct_out_dims.size(), d_out.dims().size());
ASSERT_EQ((int64_t)correct_out_features.size() / out_channels, d_out.nnz());
......
......@@ -161,7 +161,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
&d_rulebook,
&d_counter);
SparseCooTensor tmp_d_out = sparse::Coalesce<T>(dev_ctx_gpu, d_out);
SparseCooTensor tmp_d_out = sparse::CoalesceCoo<T>(dev_ctx_gpu, d_out);
ASSERT_EQ(correct_out_dims.size(), d_out.dims().size());
ASSERT_EQ((int64_t)correct_out_features.size() / out_channels, d_out.nnz());
......
......@@ -144,7 +144,7 @@ class Conv3D(_Conv3D):
Parameters:
in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel.
kernel_size(int|list|tuple): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
......@@ -277,7 +277,7 @@ class SubmConv3D(_Conv3D):
Parameters:
in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel.
kernel_size(int|list|tuple): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册