未验证 提交 87443831 编写于 作者: Z zhangkaihuo 提交者: GitHub

Standard sparse conv name (#44353)

上级 0dafbb03
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(out), Tensor(rulebook) output : Tensor(out), Tensor(rulebook)
kernel : kernel :
func : sparse_conv3d{sparse_coo, dense -> sparse_coo, dense} func : conv3d_coo{sparse_coo, dense -> sparse_coo, dense}
layout : x layout : x
intermediate : rulebook intermediate : rulebook
backward : conv3d_grad backward : conv3d_grad
......
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
args : (Tensor x, Tensor kernel, Tensor rulebook, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) args : (Tensor x, Tensor kernel, Tensor rulebook, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(x_grad), Tensor(kernel_grad) output : Tensor(x_grad), Tensor(kernel_grad)
kernel : kernel :
func : sparse_conv3d_grad{sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense} func : conv3d_coo_grad{sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense}
- backward_api : coo_to_dense_grad - backward_api : coo_to_dense_grad
forward : coo_to_dense(Tensor x) -> Tensor(out) forward : coo_to_dense(Tensor x) -> Tensor(out)
......
...@@ -17,27 +17,26 @@ limitations under the License. */ ...@@ -17,27 +17,26 @@ limitations under the License. */
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
namespace phi { namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx, void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
const SparseCooTensor& out_grad, const SparseCooTensor& out_grad,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* x_grad, SparseCooTensor* x_grad,
DenseTensor* kernel_grad); DenseTensor* kernel_grad);
template <typename T, typename Context> template <typename T, typename Context>
std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad( std::tuple<SparseCooTensor, DenseTensor> Conv3dCooGrad(
const Context& dev_ctx, const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
...@@ -52,18 +51,18 @@ std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad( ...@@ -52,18 +51,18 @@ std::tuple<SparseCooTensor, DenseTensor> Conv3dGrad(
DenseTensor kernel_grad; DenseTensor kernel_grad;
// TODO(zhangkaihuo): call InferMeta func here // TODO(zhangkaihuo): call InferMeta func here
Conv3dGradKernel<T, Context>(dev_ctx, Conv3dCooGradKernel<T, Context>(dev_ctx,
x, x,
kernel, kernel,
rulebook, rulebook,
out_grad, out_grad,
paddings, paddings,
dilations, dilations,
strides, strides,
groups, groups,
subm, subm,
&x_grad, &x_grad,
&kernel_grad); &kernel_grad);
return std::make_tuple(x_grad, kernel_grad); return std::make_tuple(x_grad, kernel_grad);
} }
......
...@@ -23,38 +23,38 @@ namespace phi { ...@@ -23,38 +23,38 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx, void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* out, SparseCooTensor* out,
DenseTensor* rulebook); DenseTensor* rulebook);
template <typename T, typename Context> template <typename T, typename Context>
SparseCooTensor Conv3d(const Context& dev_ctx, SparseCooTensor Conv3dCoo(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor kernel, const DenseTensor kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
DenseTensor* rulebook) { DenseTensor* rulebook) {
SparseCooTensor coo; SparseCooTensor coo;
Conv3dKernel<T, Context>(dev_ctx, Conv3dCooKernel<T, Context>(dev_ctx,
x, x,
kernel, kernel,
paddings, paddings,
dilations, dilations,
strides, strides,
groups, groups,
subm, subm,
&coo, &coo,
rulebook); rulebook);
return coo; return coo;
} }
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h" #include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "paddle/phi/core/visit_type.h" #include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/blas/blas.h"
...@@ -31,18 +31,18 @@ namespace sparse { ...@@ -31,18 +31,18 @@ namespace sparse {
// x_grad = out_grad * transpose(kenrel) // x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad // kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT = int> template <typename T, typename IntT = int>
void Conv3dGradCPUKernel(const CPUContext& dev_ctx, void Conv3dCooGradCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
const SparseCooTensor& out_grad, const SparseCooTensor& out_grad,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* x_grad, SparseCooTensor* x_grad,
DenseTensor* kernel_grad) { DenseTensor* kernel_grad) {
const auto& kernel_dims = kernel.dims(); const auto& kernel_dims = kernel.dims();
const int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; const int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
const int in_channels = kernel_dims[3]; const int in_channels = kernel_dims[3];
...@@ -178,42 +178,42 @@ void Conv3dGradCPUKernel(const CPUContext& dev_ctx, ...@@ -178,42 +178,42 @@ void Conv3dGradCPUKernel(const CPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx, void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
const SparseCooTensor& out_grad, const SparseCooTensor& out_grad,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* x_grad, SparseCooTensor* x_grad,
DenseTensor* kernel_grad) { DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradCPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooGradCPUKernel", ([&] {
Conv3dGradCPUKernel<T, data_t>(dev_ctx, Conv3dCooGradCPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
rulebook, rulebook,
out_grad, out_grad,
paddings, paddings,
dilations, dilations,
strides, strides,
groups, groups,
subm, subm,
x_grad, x_grad,
kernel_grad); kernel_grad);
})); }));
} }
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad, PD_REGISTER_KERNEL(conv3d_coo_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::Conv3dGradKernel, phi::sparse::Conv3dCooGradKernel,
float, float,
double) { double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
......
...@@ -27,16 +27,16 @@ namespace sparse { ...@@ -27,16 +27,16 @@ namespace sparse {
* out: (N, D, H, W, OC) * out: (N, D, H, W, OC)
**/ **/
template <typename T, typename IntT = int> template <typename T, typename IntT = int>
void Conv3dCPUKernel(const CPUContext& dev_ctx, void Conv3dCooCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* out, SparseCooTensor* out,
DenseTensor* rulebook) { DenseTensor* rulebook) {
// update padding and dilation // update padding and dilation
// Currently, only support x.layout is NDHWC, groups = 1 // Currently, only support x.layout is NDHWC, groups = 1
// if x.layout != NDHWC then transpose(x), transpose(weight) // if x.layout != NDHWC then transpose(x), transpose(weight)
...@@ -151,28 +151,28 @@ void Conv3dCPUKernel(const CPUContext& dev_ctx, ...@@ -151,28 +151,28 @@ void Conv3dCPUKernel(const CPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx, void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* out, SparseCooTensor* out,
DenseTensor* rulebook) { DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dCPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooCPUKernel", ([&] {
Conv3dCPUKernel<T, data_t>(dev_ctx, Conv3dCooCPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
paddings, paddings,
dilations, dilations,
strides, strides,
groups, groups,
subm, subm,
out, out,
rulebook); rulebook);
})); }));
} }
...@@ -180,6 +180,6 @@ void Conv3dKernel(const Context& dev_ctx, ...@@ -180,6 +180,6 @@ void Conv3dKernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
sparse_conv3d, CPU, ALL_LAYOUT, phi::sparse::Conv3dKernel, float, double) { conv3d_coo, CPU, ALL_LAYOUT, phi::sparse::Conv3dCooKernel, float, double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
} }
...@@ -21,7 +21,7 @@ limitations under the License. */ ...@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi { namespace phi {
namespace sparse { namespace sparse {
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h" #include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
...@@ -39,18 +39,18 @@ namespace sparse { ...@@ -39,18 +39,18 @@ namespace sparse {
// x_grad = out_grad * transpose(kenrel) // x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad // kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT> template <typename T, typename IntT>
void Conv3dGradGPUKernel(const GPUContext& dev_ctx, void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
const SparseCooTensor& out_grad, const SparseCooTensor& out_grad,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* x_grad, SparseCooTensor* x_grad,
DenseTensor* kernel_grad) { DenseTensor* kernel_grad) {
const auto& kernel_dims = kernel.dims(); const auto& kernel_dims = kernel.dims();
const int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; const int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
const int in_channels = kernel_dims[3]; const int in_channels = kernel_dims[3];
...@@ -220,42 +220,42 @@ void Conv3dGradGPUKernel(const GPUContext& dev_ctx, ...@@ -220,42 +220,42 @@ void Conv3dGradGPUKernel(const GPUContext& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dGradKernel(const Context& dev_ctx, void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const DenseTensor& rulebook, const DenseTensor& rulebook,
const SparseCooTensor& out_grad, const SparseCooTensor& out_grad,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* x_grad, SparseCooTensor* x_grad,
DenseTensor* kernel_grad) { DenseTensor* kernel_grad) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGradGPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooGradGPUKernel", ([&] {
Conv3dGradGPUKernel<T, data_t>(dev_ctx, Conv3dCooGradGPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
rulebook, rulebook,
out_grad, out_grad,
paddings, paddings,
dilations, dilations,
strides, strides,
groups, groups,
subm, subm,
x_grad, x_grad,
kernel_grad); kernel_grad);
})); }));
} }
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d_grad, PD_REGISTER_KERNEL(conv3d_coo_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::Conv3dGradKernel, phi::sparse::Conv3dCooGradKernel,
float, float,
double, double,
phi::dtype::float16) { phi::dtype::float16) {
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -27,16 +27,16 @@ namespace phi { ...@@ -27,16 +27,16 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename IntT> template <typename T, typename IntT>
void Conv3dGPUKernel(const GPUContext& dev_ctx, void Conv3dCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* out, SparseCooTensor* out,
DenseTensor* rulebook) { DenseTensor* rulebook) {
// update padding and dilation // update padding and dilation
// Currently, only support x.layout is NDHWC, groups = 1 // Currently, only support x.layout is NDHWC, groups = 1
// if x.layout != NDHWC then transpose(x), transpose(weight) // if x.layout != NDHWC then transpose(x), transpose(weight)
...@@ -190,38 +190,38 @@ void Conv3dGPUKernel(const GPUContext& dev_ctx, ...@@ -190,38 +190,38 @@ void Conv3dGPUKernel(const GPUContext& dev_ctx,
* out: (N, D, H, W, OC) * out: (N, D, H, W, OC)
**/ **/
template <typename T, typename Context> template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx, void Conv3dCooKernel(const Context& dev_ctx,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& kernel, const DenseTensor& kernel,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::vector<int>& dilations, const std::vector<int>& dilations,
const std::vector<int>& strides, const std::vector<int>& strides,
const int groups, const int groups,
const bool subm, const bool subm,
SparseCooTensor* out, SparseCooTensor* out,
DenseTensor* rulebook) { DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES( PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] { x.non_zero_indices().dtype(), "Conv3dCooGPUKernel", ([&] {
Conv3dGPUKernel<T, data_t>(dev_ctx, Conv3dCooGPUKernel<T, data_t>(dev_ctx,
x, x,
kernel, kernel,
paddings, paddings,
dilations, dilations,
strides, strides,
groups, groups,
subm, subm,
out, out,
rulebook); rulebook);
})); }));
} }
} // namespace sparse } // namespace sparse
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d, PD_REGISTER_KERNEL(conv3d_coo,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::sparse::Conv3dKernel, phi::sparse::Conv3dCooKernel,
float, float,
double, double,
phi::dtype::float16) { phi::dtype::float16) {
......
...@@ -28,7 +28,7 @@ limitations under the License. */ ...@@ -28,7 +28,7 @@ limitations under the License. */
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/utils.cu.h" #include "paddle/phi/kernels/funcs/sparse/utils.cu.h"
#include "paddle/phi/kernels/primitive/compute_primitives.h" #include "paddle/phi/kernels/primitive/compute_primitives.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi { namespace phi {
namespace sparse { namespace sparse {
......
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
PD_DECLARE_KERNEL(sparse_conv3d, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(conv3d_coo, CPU, ALL_LAYOUT);
template <typename T> template <typename T>
void TestConv3dBase(const std::vector<int>& indices, void TestConv3dBase(const std::vector<int>& indices,
......
...@@ -23,8 +23,8 @@ limitations under the License. */ ...@@ -23,8 +23,8 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h" #include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_grad_kernel.h" #include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/conv_kernel.h"
namespace phi { namespace phi {
namespace tests { namespace tests {
...@@ -114,15 +114,15 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -114,15 +114,15 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (!std::is_same<T, phi::dtype::float16>::value) { if (!std::is_same<T, phi::dtype::float16>::value) {
DenseTensor rulebook = phi::Empty( DenseTensor rulebook = phi::Empty(
dev_ctx_cpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW)); dev_ctx_cpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW));
SparseCooTensor out = sparse::Conv3d<T>(dev_ctx_cpu, SparseCooTensor out = sparse::Conv3dCoo<T>(dev_ctx_cpu,
x_tensor, x_tensor,
kernel_tensor, kernel_tensor,
paddings, paddings,
dilations, dilations,
strides, strides,
1, 1,
subm, subm,
&rulebook); &rulebook);
ASSERT_EQ(correct_out_dims.size(), out.dims().size()); ASSERT_EQ(correct_out_dims.size(), out.dims().size());
for (int i = 0; i < correct_out_dims.size(); i++) { for (int i = 0; i < correct_out_dims.size(); i++) {
...@@ -139,16 +139,16 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -139,16 +139,16 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (backward) { if (backward) {
std::tuple<SparseCooTensor, DenseTensor> grads = std::tuple<SparseCooTensor, DenseTensor> grads =
sparse::Conv3dGrad<T>(dev_ctx_cpu, sparse::Conv3dCooGrad<T>(dev_ctx_cpu,
x_tensor, x_tensor,
kernel_tensor, kernel_tensor,
rulebook, rulebook,
out, out,
paddings, paddings,
dilations, dilations,
strides, strides,
1, 1,
subm); subm);
f_verify(std::get<0>(grads).non_zero_elements().data<T>(), features_grad); f_verify(std::get<0>(grads).non_zero_elements().data<T>(), features_grad);
f_verify(std::get<1>(grads).data<T>(), kernel_grad); f_verify(std::get<1>(grads).data<T>(), kernel_grad);
} }
...@@ -198,15 +198,15 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -198,15 +198,15 @@ void TestConv3dBase(const std::vector<IntT>& indices,
DenseTensor d_rulebook = phi::Empty( DenseTensor d_rulebook = phi::Empty(
dev_ctx_gpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW)); dev_ctx_gpu, DenseTensorMeta(indices_dtype, {1}, DataLayout::NCHW));
SparseCooTensor d_out = sparse::Conv3d<T>(dev_ctx_gpu, SparseCooTensor d_out = sparse::Conv3dCoo<T>(dev_ctx_gpu,
d_x_tensor, d_x_tensor,
d_kernel_tensor, d_kernel_tensor,
paddings, paddings,
dilations, dilations,
strides, strides,
1, 1,
subm, subm,
&d_rulebook); &d_rulebook);
SparseCooTensor tmp_d_out = sparse::Coalesce<T>(dev_ctx_gpu, d_out); SparseCooTensor tmp_d_out = sparse::Coalesce<T>(dev_ctx_gpu, d_out);
...@@ -242,16 +242,16 @@ void TestConv3dBase(const std::vector<IntT>& indices, ...@@ -242,16 +242,16 @@ void TestConv3dBase(const std::vector<IntT>& indices,
if (backward) { if (backward) {
std::tuple<SparseCooTensor, DenseTensor> grads = std::tuple<SparseCooTensor, DenseTensor> grads =
sparse::Conv3dGrad<T>(dev_ctx_gpu, sparse::Conv3dCooGrad<T>(dev_ctx_gpu,
d_x_tensor, d_x_tensor,
d_kernel_tensor, d_kernel_tensor,
d_rulebook, d_rulebook,
d_out, d_out,
paddings, paddings,
dilations, dilations,
strides, strides,
1, 1,
subm); subm);
DenseTensor d_features_grad = std::get<0>(grads).non_zero_elements(); DenseTensor d_features_grad = std::get<0>(grads).non_zero_elements();
DenseTensor d_kernel_grad = std::get<1>(grads); DenseTensor d_kernel_grad = std::get<1>(grads);
DenseTensor h_features_grad = DenseTensor h_features_grad =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册