未验证 提交 87443831 编写于 作者: Z zhangkaihuo 提交者: GitHub

Standard sparse conv name (#44353)

上级 0dafbb03
......@@ -84,7 +84,7 @@
args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(out), Tensor(rulebook)
kernel :
func : sparse_conv3d{sparse_coo, dense -> sparse_coo, dense}
func : conv3d_coo{sparse_coo, dense -> sparse_coo, dense}
layout : x
intermediate : rulebook
backward : conv3d_grad
......
......@@ -76,7 +76,7 @@
args : (Tensor x, Tensor kernel, Tensor rulebook, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(x_grad), Tensor(kernel_grad)
kernel :
func : sparse_conv3d_grad{sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense}
func : conv3d_coo_grad{sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense}
- backward_api : coo_to_dense_grad
forward : coo_to_dense(Tensor x) -> Tensor(out)
......
......@@ -17,13 +17,12 @@ limitations under the License. */
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
namespace phi {
namespace sparse {
template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx,
void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
......@@ -37,7 +36,7 @@ void Conv3dGradKernel(const Context& dev_ctx,
DenseTensor* kernel_grad);
template <typename T, typename Context>
std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad(
std::tuple<SparseCooTensor, DenseTensor> Conv3dCooGrad(
const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
......@@ -52,7 +51,7 @@ std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad(
DenseTensor kernel_grad;
// TODO(zhangkaihuo): call InferMeta func here
Conv3dGradKernel<T, Context>(dev_ctx,
Conv3dCooGradKernel<T, Context>(dev_ctx,
x,
kernel,
rulebook,
......
......@@ -23,7 +23,7 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx,
void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
......@@ -35,7 +35,7 @@ void Conv3dKernel(const Context& dev_ctx,
DenseTensor* rulebook);
template <typename T, typename Context>
SparseCooTensor Conv3d(const Context& dev_ctx,
SparseCooTensor Conv3dCoo(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor kernel,
const std::vector<int>& paddings,
......@@ -45,7 +45,7 @@ SparseCooTensor Conv3d(const Context& dev_ctx,
const bool subm,
DenseTensor* rulebook) {
SparseCooTensor coo;
Conv3dKernel<T, Context>(dev_ctx,
Conv3dCooKernel<T, Context>(dev_ctx,
x,
kernel,
paddings,
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h"
#include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
......@@ -31,7 +31,7 @@ namespace sparse {
// x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT = int>
void Conv3dGradCPUKernel(const CPUContext& dev_ctx,
void Conv3dCooGradCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
......@@ -178,7 +178,7 @@ void Conv3dGradCPUKernel(const CPUContext& dev_ctx,
}
template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx,
void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
......@@ -191,8 +191,8 @@ void Conv3dGradKernel(const Context& dev_ctx,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradCPUKernel", ([&] {
Conv3dGradCPUKernel<T, data_t>(dev_ctx,
x.non_zero_indices().dtype(), "Conv3dCooGradCPUKernel", ([&] {
Conv3dCooGradCPUKernel<T, data_t>(dev_ctx,
x,
kernel,
rulebook,
......@@ -210,10 +210,10 @@ void Conv3dGradKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad,
PD_REGISTER_KERNEL(conv3d_coo_grad,
CPU,
ALL_LAYOUT,
phi::sparse::Conv3dGradKernel,
phi::sparse::Conv3dCooGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
......
......@@ -27,7 +27,7 @@ namespace sparse {
* out: (N, D, H, W, OC)
**/
template <typename T, typename IntT = int>
void Conv3dCPUKernel(const CPUContext& dev_ctx,
void Conv3dCooCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
......@@ -151,7 +151,7 @@ void Conv3dCPUKernel(const CPUContext& dev_ctx,
}
template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx,
void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
......@@ -162,8 +162,8 @@ void Conv3dKernel(const Context& dev_ctx,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dCPUKernel", ([&] {
Conv3dCPUKernel<T, data_t>(dev_ctx,
x.non_zero_indices().dtype(), "Conv3dCooCPUKernel", ([&] {
Conv3dCooCPUKernel<T, data_t>(dev_ctx,
x,
kernel,
paddings,
......@@ -180,6 +180,6 @@ void Conv3dKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(
sparse_conv3d, CPU, ALL_LAYOUT, phi::sparse::Conv3dKernel, float, double) {
conv3d_coo, CPU, ALL_LAYOUT, phi::sparse::Conv3dCooKernel, float, double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
......@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
#include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi {
namespace sparse {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h"
#include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
......@@ -39,7 +39,7 @@ namespace sparse {
// x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT>
void Conv3dGradGPUKernel(const GPUContext& dev_ctx,
void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
......@@ -220,7 +220,7 @@ void Conv3dGradGPUKernel(const GPUContext& dev_ctx,
}
template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx,
void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const DenseTensor& rulebook,
......@@ -233,8 +233,8 @@ void Conv3dGradKernel(const Context& dev_ctx,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradGPUKernel", ([&] {
Conv3dGradGPUKernel<T, data_t>(dev_ctx,
x.non_zero_indices().dtype(), "Conv3dCooGradGPUKernel", ([&] {
Conv3dCooGradGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
rulebook,
......@@ -252,10 +252,10 @@ void Conv3dGradKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad,
PD_REGISTER_KERNEL(conv3d_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dGradKernel,
phi::sparse::Conv3dCooGradKernel,
float,
double,
phi::dtype::float16) {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
#include "paddle/phi/kernels/sparse/conv_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
......@@ -27,7 +27,7 @@ namespace phi {
namespace sparse {
template <typename T, typename IntT>
void Conv3dGPUKernel(const GPUContext& dev_ctx,
void Conv3dCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
......@@ -190,7 +190,7 @@ void Conv3dGPUKernel(const GPUContext& dev_ctx,
* out: (N, D, H, W, OC)
**/
template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx,
void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
......@@ -201,8 +201,8 @@ void Conv3dKernel(const Context& dev_ctx,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] {
Conv3dGPUKernel<T, data_t>(dev_ctx,
x.non_zero_indices().dtype(), "Conv3dCooGPUKernel", ([&] {
Conv3dCooGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
paddings,
......@@ -218,10 +218,10 @@ void Conv3dKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d,
PD_REGISTER_KERNEL(conv3d_coo,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dKernel,
phi::sparse::Conv3dCooKernel,
float,
double,
phi::dtype::float16) {
......
......@@ -28,7 +28,7 @@ limitations under the License. */
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/utils.cu.h"
#include "paddle/phi/kernels/primitive/compute_primitives.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
#include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi {
namespace sparse {
......
......@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
PD_DECLARE_KERNEL(sparse_conv3d, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(conv3d_coo, CPU, ALL_LAYOUT);
template <typename T>
void TestConv3dBase(const std::vector<int>& indices,
......
......@@ -23,8 +23,8 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
#include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi {
namespace tests {
......@@ -114,7 +114,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (!std::is_same<T, phi::dtype::float16>::value) {
DenseTensor rulebook = phi::Empty(
dev_ctx_cpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW));
SparseCooTensor out = sparse::Conv3d<T>(dev_ctx_cpu,
SparseCooTensor out = sparse::Conv3dCoo<T>(dev_ctx_cpu,
x_tensor,
kernel_tensor,
paddings,
......@@ -139,7 +139,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (backward) {
std::tuple<SparseCooTensor, DenseTensor> grads =
sparse::Conv3dGrad<T>(dev_ctx_cpu,
sparse::Conv3dCooGrad<T>(dev_ctx_cpu,
x_tensor,
kernel_tensor,
rulebook,
......@@ -198,7 +198,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
DenseTensor d_rulebook = phi::Empty(
dev_ctx_gpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW));
SparseCooTensor d_out = sparse::Conv3d<T>(dev_ctx_gpu,
SparseCooTensor d_out = sparse::Conv3dCoo<T>(dev_ctx_gpu,
d_x_tensor,
d_kernel_tensor,
paddings,
......@@ -242,7 +242,7 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (backward) {
std::tuple<SparseCooTensor, DenseTensor> grads =
sparse::Conv3dGrad<T>(dev_ctx_gpu,
sparse::Conv3dCooGrad<T>(dev_ctx_gpu,
d_x_tensor,
d_kernel_tensor,
d_rulebook,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册