未验证 提交 0b82fb32 编写于 作者: Z zhangkaihuo 提交者: GitHub

[Sparse]Remove unused code (#46021)

上级 61012a76
...@@ -370,11 +370,6 @@ cc_library( ...@@ -370,11 +370,6 @@ cc_library(
SRCS api_custom_impl.cc SRCS api_custom_impl.cc
DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils backward_infermeta DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils backward_infermeta
phi_data_transform) phi_data_transform)
cc_library(
sparse_api_custom_impl
SRCS sparse_api_custom_impl.cc
DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform
tensor_copy)
cc_library( cc_library(
phi_function_api phi_function_api
...@@ -396,12 +391,11 @@ cc_library( ...@@ -396,12 +391,11 @@ cc_library(
cc_library( cc_library(
sparse_api sparse_api
SRCS ${sparse_api_source_file} SRCS ${sparse_api_source_file}
DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api_custom_impl) DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils)
cc_library( cc_library(
sparse_bw_api sparse_bw_api
SRCS ${sparse_bw_api_source_file} SRCS ${sparse_bw_api_source_file}
DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api)
sparse_api_custom_impl)
cc_library( cc_library(
phi_dygraph_api phi_dygraph_api
SRCS ${dygraph_api_source_file} SRCS ${dygraph_api_source_file}
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/api/lib/sparse_api_custom_impl.h"
#include <memory>
#include "glog/logging.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/core/kernel_registry.h"
namespace paddle {
namespace experimental {
namespace sparse {
Tensor to_sparse_coo_impl(const Tensor& x, const int64_t sparse_dim) {
if (x.layout() == phi::DataLayout::SPARSE_COO) {
return x;
}
// 1. Get kernel signature and kernel
std::string kernel_name = "dense_to_coo";
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
kernel_name = "csr_to_coo";
}
auto kernel_key_set = ParseKernelKeyByInputArgs(x);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
kernel_name, kernel_key);
const auto& kernel = kernel_result.kernel;
VLOG(6) << "add API kernel key: " << kernel_key;
VLOG(6) << "to API kernel: " << kernel;
// 2. Get Device Context
auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend());
auto kernel_context = phi::KernelContext(dev_ctx);
// 3. Auto data transform
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
auto input = std::dynamic_pointer_cast<phi::SparseCsrTensor>(x.impl());
kernel_context.EmplaceBackInput(input.get());
} else {
auto input = std::dynamic_pointer_cast<phi::DenseTensor>(x.impl());
kernel_context.EmplaceBackInput(input.get());
kernel_context.EmplaceBackAttr(sparse_dim);
}
// 4. InferMeta
auto indices_meta =
phi::DenseTensorMeta(phi::DataType::INT64, {1}, phi::DataLayout::NCHW);
auto elements_meta = phi::DenseTensorMeta(x.dtype(), {1}, x.layout());
// 5. Prepare outputs
// create empty SparseCooTensor
phi::DenseTensor non_zero_indices(std::make_shared<phi::Allocation>(),
std::move(indices_meta));
phi::DenseTensor non_zero_elements(std::make_shared<phi::Allocation>(),
std::move(elements_meta));
auto coo = std::make_shared<phi::SparseCooTensor>(
non_zero_indices, non_zero_elements, x.dims());
kernel_context.EmplaceBackOutput(coo.get());
Tensor out;
out.set_impl(coo);
// 6. Call kernel
kernel(&kernel_context);
return out;
}
Tensor to_sparse_csr_impl(const Tensor& x) {
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
return x;
}
// 1. Get kernel signature and kernel
std::string kernel_name = "dense_to_csr";
if (x.layout() == phi::DataLayout::SPARSE_COO) {
kernel_name = "coo_to_csr";
}
auto kernel_key_set = ParseKernelKeyByInputArgs(x);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
kernel_name, kernel_key);
const auto& kernel = kernel_result.kernel;
VLOG(6) << "add API kernel key: " << kernel_key;
VLOG(6) << "to API kernel: " << kernel;
// 2. Get Device Context
auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend());
auto kernel_context = phi::KernelContext(dev_ctx);
// 3. Auto data transform
if (x.layout() == phi::DataLayout::SPARSE_COO) {
auto input = std::dynamic_pointer_cast<phi::SparseCooTensor>(x.impl());
kernel_context.EmplaceBackInput(input.get());
} else {
auto input = std::dynamic_pointer_cast<phi::DenseTensor>(x.impl());
kernel_context.EmplaceBackInput(input.get());
}
// 4. InferMeta
auto crows_meta =
phi::DenseTensorMeta(phi::DataType::INT64, {1}, phi::DataLayout::NCHW);
auto cols_meta =
phi::DenseTensorMeta(phi::DataType::INT64, {1}, phi::DataLayout::NCHW);
auto elements_meta = phi::DenseTensorMeta(x.dtype(), {1}, x.layout());
// 5. Prepare outputs
// create empty SparseCooTensor
phi::DenseTensor non_zero_crows(std::make_shared<phi::Allocation>(),
std::move(crows_meta));
phi::DenseTensor non_zero_cols(std::make_shared<phi::Allocation>(),
std::move(cols_meta));
phi::DenseTensor non_zero_elements(std::make_shared<phi::Allocation>(),
std::move(elements_meta));
auto csr = std::make_shared<phi::SparseCsrTensor>(
non_zero_crows, non_zero_cols, non_zero_elements, x.dims());
kernel_context.EmplaceBackOutput(csr.get());
Tensor out;
out.set_impl(csr);
// 6. Call kernel
kernel(&kernel_context);
return out;
}
Tensor to_dense_impl(const Tensor& x) {
if (x.layout() != phi::DataLayout::SPARSE_CSR &&
x.layout() != phi::DataLayout::SPARSE_COO) {
return x;
}
// 1. Get kernel signature and kernel
std::string kernel_name = "coo_to_dense";
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
kernel_name = "csr_to_dense";
}
auto kernel_key_set = ParseKernelKeyByInputArgs(x);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
kernel_name, kernel_key);
const auto& kernel = kernel_result.kernel;
VLOG(6) << "add API kernel key: " << kernel_key;
VLOG(6) << "to API kernel: " << kernel;
// 2. Get Device Context
auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend());
auto kernel_context = phi::KernelContext(dev_ctx);
// 3. Auto data transform
if (x.layout() == phi::DataLayout::SPARSE_COO) {
auto input = std::dynamic_pointer_cast<phi::SparseCooTensor>(x.impl());
kernel_context.EmplaceBackInput(input.get());
} else {
auto input = std::dynamic_pointer_cast<phi::SparseCsrTensor>(x.impl());
kernel_context.EmplaceBackInput(input.get());
}
// 4. InferMeta
auto dense_meta = phi::DenseTensorMeta(x.dtype(), x.dims(), x.layout());
// 5. Prepare outputs
// create empty SparseCooTensor
auto dense_out = std::make_shared<phi::DenseTensor>(
std::make_shared<phi::Allocation>(), std::move(dense_meta));
kernel_context.EmplaceBackOutput(dense_out.get());
Tensor out;
out.set_impl(dense_out);
// 6. Call kernel
kernel(&kernel_context);
return out;
}
} // namespace sparse
} // namespace experimental
} // namespace paddle
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/backend.h"
namespace paddle {
namespace experimental {
namespace sparse {
Tensor to_dense_impl(const Tensor& x);
Tensor to_sparse_coo_impl(const Tensor& x, const int64_t sparse_dim);
Tensor to_sparse_csr_impl(const Tensor& x);
} // namespace sparse
} // namespace experimental
} // namespace paddle
...@@ -43,7 +43,6 @@ def source_include(header_file_path): ...@@ -43,7 +43,6 @@ def source_include(header_file_path):
#include "paddle/phi/api/lib/api_gen_utils.h" #include "paddle/phi/api/lib/api_gen_utils.h"
#include "paddle/phi/api/lib/data_transform.h" #include "paddle/phi/api/lib/data_transform.h"
#include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/sparse_api_custom_impl.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/infermeta/binary.h" #include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/multiary.h" #include "paddle/phi/infermeta/multiary.h"
......
...@@ -229,7 +229,6 @@ def source_include(header_file_path): ...@@ -229,7 +229,6 @@ def source_include(header_file_path):
#include "paddle/phi/api/lib/api_gen_utils.h" #include "paddle/phi/api/lib/api_gen_utils.h"
#include "paddle/phi/api/lib/data_transform.h" #include "paddle/phi/api/lib/data_transform.h"
#include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/sparse_api_custom_impl.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
""" """
......
...@@ -111,7 +111,6 @@ def source_include(header_file_path): ...@@ -111,7 +111,6 @@ def source_include(header_file_path):
#include "paddle/phi/api/include/sparse_api.h" #include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/lib/api_gen_utils.h" #include "paddle/phi/api/lib/api_gen_utils.h"
#include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/sparse_api_custom_impl.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
""" """
......
...@@ -52,7 +52,6 @@ TEST(sparse_coo_tensor, construct) { ...@@ -52,7 +52,6 @@ TEST(sparse_coo_tensor, construct) {
CHECK_EQ(sparse.numel(), 9); CHECK_EQ(sparse.numel(), 9);
CHECK(sparse.dims() == dense_dims); CHECK(sparse.dims() == dense_dims);
CHECK(sparse.dtype() == DataType::FLOAT32); CHECK(sparse.dtype() == DataType::FLOAT32);
CHECK(sparse.layout() == DataLayout::SPARSE_COO);
CHECK(sparse.place() == phi::CPUPlace()); CHECK(sparse.place() == phi::CPUPlace());
} }
......
...@@ -62,7 +62,6 @@ TEST(sparse_csr_tensor, construct) { ...@@ -62,7 +62,6 @@ TEST(sparse_csr_tensor, construct) {
CHECK_EQ(sparse.numel(), 9); CHECK_EQ(sparse.numel(), 9);
CHECK(sparse.dims() == dense_dims); CHECK(sparse.dims() == dense_dims);
CHECK(sparse.dtype() == DataType::FLOAT32); CHECK(sparse.dtype() == DataType::FLOAT32);
CHECK(sparse.layout() == DataLayout::SPARSE_CSR);
CHECK(sparse.place() == paddle::platform::CPUPlace()); CHECK(sparse.place() == paddle::platform::CPUPlace());
CHECK(sparse.initialized() == true); CHECK(sparse.initialized() == true);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册