未验证 提交 9e307229 编写于 作者: Z zhangkaihuo 提交者: GitHub

Standard name of sparse pool (#44344)

上级 f382eb06
......@@ -316,10 +316,10 @@
args : (Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides)
output : Tensor(out), Tensor(rulebook)
kernel :
func : sparse_maxpool{sparse_coo -> sparse_coo, dense}
func : maxpool_coo{sparse_coo -> sparse_coo, dense}
layout : x
intermediate : rulebook
backward : sparse_maxpool_grad
backward : maxpool_grad
- api: mv
args : (Tensor x, Tensor vec)
......
......@@ -137,6 +137,13 @@
matmul_coo_dense_grad {sparse_coo, dense, dense -> sparse_coo, dense},
matmul_coo_coo_grad {sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}
- backward_api : maxpool_grad
forward : maxpool(Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) -> Tensor(out), Tensor(rulebook)
args : (Tensor x, Tensor rulebook, Tensor out, Tensor out_grad, int[] kernel_sizes)
output : Tensor(x_grad)
kernel :
func : maxpool_coo_grad {sparse_coo, dense, sparse_coo, sparse_coo -> sparse_coo}
- backward_api : multiply_grad
forward : multiply(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
......@@ -198,13 +205,6 @@
kernel :
func : softmax_csr_grad{sparse_csr, sparse_csr -> sparse_csr}
- backward_api : sparse_maxpool_grad
forward : sparse_maxpool(Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) -> Tensor(out), Tensor(rulebook)
args : (Tensor x, Tensor rulebook, Tensor out, Tensor out_grad, int[] kernel_sizes)
output : Tensor(x_grad)
kernel :
func : sparse_maxpool_grad {sparse_coo, dense, sparse_coo, sparse_coo -> sparse_coo}
- backward_api : sqrt_grad
forward : sqrt(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_pool_grad_kernel.h"
#include "paddle/phi/kernels/sparse/pool_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
......@@ -25,7 +25,7 @@ namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
void MaxPoolGradCPUKernel(const CPUContext& dev_ctx,
void MaxPoolCooGradCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const SparseCooTensor& out,
......@@ -75,7 +75,7 @@ void MaxPoolGradCPUKernel(const CPUContext& dev_ctx,
}
template <typename T, typename Context>
void MaxPoolGradKernel(const Context& dev_ctx,
void MaxPoolCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const SparseCooTensor& out,
......@@ -83,8 +83,8 @@ void MaxPoolGradKernel(const Context& dev_ctx,
const std::vector<int>& kernel_sizes,
SparseCooTensor* x_grad) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "MaxPoolGradCPUKernel", ([&] {
MaxPoolGradCPUKernel<T, data_t>(
x.non_zero_indices().dtype(), "MaxPoolCooGradCPUKernel", ([&] {
MaxPoolCooGradCPUKernel<T, data_t>(
dev_ctx, x, rulebook, out, out_grad, kernel_sizes, x_grad);
}));
}
......@@ -92,10 +92,10 @@ void MaxPoolGradKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_maxpool_grad,
PD_REGISTER_KERNEL(maxpool_coo_grad,
CPU,
ALL_LAYOUT,
phi::sparse::MaxPoolGradKernel,
phi::sparse::MaxPoolCooGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_pool_kernel.h"
#include "paddle/phi/kernels/sparse/pool_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
......@@ -30,7 +30,7 @@ namespace sparse {
* out: (N, D, H, W, OC)
**/
template <typename T, typename IntT = int>
void MaxPoolCPUKernel(const CPUContext& dev_ctx,
void MaxPoolCooCPUKernel(const CPUContext& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
......@@ -98,7 +98,7 @@ void MaxPoolCPUKernel(const CPUContext& dev_ctx,
}
template <typename T, typename Context>
void MaxPoolKernel(const Context& dev_ctx,
void MaxPoolCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
......@@ -107,8 +107,8 @@ void MaxPoolKernel(const Context& dev_ctx,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "MaxPoolCPUKernel", ([&] {
MaxPoolCPUKernel<T, data_t>(dev_ctx,
x.non_zero_indices().dtype(), "MaxPoolCooCPUKernel", ([&] {
MaxPoolCooCPUKernel<T, data_t>(dev_ctx,
x,
kernel_sizes,
paddings,
......@@ -122,10 +122,10 @@ void MaxPoolKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_maxpool,
PD_REGISTER_KERNEL(maxpool_coo,
CPU,
ALL_LAYOUT,
phi::sparse::MaxPoolKernel,
phi::sparse::MaxPoolCooKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_pool_grad_kernel.h"
#include "paddle/phi/kernels/sparse/pool_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
......@@ -52,7 +52,7 @@ __global__ void MaxPoolGradCudaKernel(const T* in_features_ptr,
}
template <typename T, typename IntT = int>
void MaxPoolGradGPUKernel(const GPUContext& dev_ctx,
void MaxPoolCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const SparseCooTensor& out,
......@@ -121,7 +121,7 @@ void MaxPoolGradGPUKernel(const GPUContext& dev_ctx,
}
template <typename T, typename Context>
void MaxPoolGradKernel(const Context& dev_ctx,
void MaxPoolCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const SparseCooTensor& out,
......@@ -129,8 +129,8 @@ void MaxPoolGradKernel(const Context& dev_ctx,
const std::vector<int>& kernel_sizes,
SparseCooTensor* x_grad) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "MaxPoolGradGPUKernel", ([&] {
MaxPoolGradGPUKernel<T, data_t>(
x.non_zero_indices().dtype(), "MaxPoolCooGradGPUKernel", ([&] {
MaxPoolCooGradGPUKernel<T, data_t>(
dev_ctx, x, rulebook, out, out_grad, kernel_sizes, x_grad);
}));
}
......@@ -138,10 +138,10 @@ void MaxPoolGradKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_maxpool_grad,
PD_REGISTER_KERNEL(maxpool_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::MaxPoolGradKernel,
phi::sparse::MaxPoolCooGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/sparse_pool_kernel.h"
#include "paddle/phi/kernels/sparse/pool_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
......@@ -48,7 +48,7 @@ __global__ void MaxPoolCudaKernel(const T* in_features_ptr,
* out: (N, D, H, W, OC)
**/
template <typename T, typename IntT = int>
void MaxPoolGPUKernel(const GPUContext& dev_ctx,
void MaxPoolCooGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
......@@ -127,7 +127,7 @@ void MaxPoolGPUKernel(const GPUContext& dev_ctx,
}
template <typename T, typename Context>
void MaxPoolKernel(const Context& dev_ctx,
void MaxPoolCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
......@@ -136,8 +136,8 @@ void MaxPoolKernel(const Context& dev_ctx,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "MaxPoolGPUKernel", ([&] {
MaxPoolGPUKernel<T, data_t>(dev_ctx,
x.non_zero_indices().dtype(), "MaxPoolCooGPUKernel", ([&] {
MaxPoolCooGPUKernel<T, data_t>(dev_ctx,
x,
kernel_sizes,
paddings,
......@@ -151,10 +151,10 @@ void MaxPoolKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_maxpool,
PD_REGISTER_KERNEL(maxpool_coo,
GPU,
ALL_LAYOUT,
phi::sparse::MaxPoolKernel,
phi::sparse::MaxPoolCooKernel,
float,
double,
phi::dtype::float16) {
......
......@@ -22,7 +22,7 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void MaxPoolGradKernel(const Context& dev_ctx,
void MaxPoolCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const SparseCooTensor& out,
......@@ -31,14 +31,14 @@ void MaxPoolGradKernel(const Context& dev_ctx,
SparseCooTensor* x_grad);
template <typename T, typename Context>
SparseCooTensor MaxPoolGrad(const Context& dev_ctx,
SparseCooTensor MaxPoolCooGrad(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& rulebook,
const SparseCooTensor& out,
const SparseCooTensor& out_grad,
const std::vector<int>& kernel_sizes) {
SparseCooTensor x_grad;
MaxPoolGradKernel<T, Context>(
MaxPoolCooGradKernel<T, Context>(
dev_ctx, x, rulebook, out, out_grad, kernel_sizes, &x_grad);
return x_grad;
}
......
......@@ -22,7 +22,7 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void MaxPoolKernel(const Context& dev_ctx,
void MaxPoolCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
......@@ -32,7 +32,7 @@ void MaxPoolKernel(const Context& dev_ctx,
DenseTensor* rulebook);
template <typename T, typename Context>
SparseCooTensor MaxPool(const Context& dev_ctx,
SparseCooTensor MaxPoolCoo(const Context& dev_ctx,
const SparseCooTensor& x,
const std::vector<int>& kernel_sizes,
const std::vector<int>& paddings,
......@@ -40,7 +40,7 @@ SparseCooTensor MaxPool(const Context& dev_ctx,
const std::vector<int>& strides,
DenseTensor* rulebook) {
SparseCooTensor coo;
MaxPoolKernel<T, Context>(
MaxPoolCooKernel<T, Context>(
dev_ctx, x, kernel_sizes, paddings, dilations, strides, &coo, rulebook);
return coo;
}
......
......@@ -23,8 +23,8 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/sparse/coalesce_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_pool_grad_kernel.h"
#include "paddle/phi/kernels/sparse/sparse_pool_kernel.h"
#include "paddle/phi/kernels/sparse/pool_grad_kernel.h"
#include "paddle/phi/kernels/sparse/pool_kernel.h"
namespace phi {
namespace tests {
......@@ -91,7 +91,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
if (!std::is_same<T, phi::dtype::float16>::value) {
DenseTensor rulebook;
SparseCooTensor out = sparse::MaxPool<T>(dev_ctx_cpu,
SparseCooTensor out = sparse::MaxPoolCoo<T>(dev_ctx_cpu,
x_tensor,
kernel_sizes,
paddings,
......@@ -113,7 +113,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
f_verify(out.non_zero_elements().data<T>(), correct_out_features);
if (backward) {
SparseCooTensor x_grad = sparse::MaxPoolGrad<T>(
SparseCooTensor x_grad = sparse::MaxPoolCooGrad<T>(
dev_ctx_cpu, x_tensor, rulebook, out, out, kernel_sizes);
f_verify(x_grad.non_zero_elements().data<T>(), features_grad);
}
......@@ -151,7 +151,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
SparseCooTensor d_x_tensor(d_indices_tensor, d_features_tensor, x_dims);
DenseTensor d_rulebook;
SparseCooTensor d_out = sparse::MaxPool<T>(dev_ctx_gpu,
SparseCooTensor d_out = sparse::MaxPoolCoo<T>(dev_ctx_gpu,
d_x_tensor,
kernel_sizes,
paddings,
......@@ -191,7 +191,7 @@ void TestMaxPoolBase(const std::vector<IntT>& indices,
f_verify(h_features_tensor.data<T>(), correct_out_features);
if (backward) {
SparseCooTensor x_grad = sparse::MaxPoolGrad<T>(
SparseCooTensor x_grad = sparse::MaxPoolCooGrad<T>(
dev_ctx_gpu, d_x_tensor, d_rulebook, d_out, d_out, kernel_sizes);
DenseTensor h_features_grad =
phi::EmptyLike<T>(dev_ctx_cpu, x_grad.non_zero_elements());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册