sparse_api.cc 8.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/api/include/sparse_api.h"
16 17 18

#include <memory>
#include "glog/logging.h"
19 20 21 22 23
#include "paddle/phi/api/lib/api_registry.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/infermeta/unary.h"
24

25 26 27 28 29 30
PD_DECLARE_KERNEL(dense_to_sparse_coo, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_coo, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_csr, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_csr, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_dense, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_dense, CPU, ALL_LAYOUT);
31 32

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
33 34 35 36 37 38
PD_DECLARE_KERNEL(dense_to_sparse_coo, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_coo, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_csr, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_csr, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_dense, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_dense, GPU, ALL_LAYOUT);
39 40 41 42 43 44 45 46 47
#endif

namespace paddle {
namespace experimental {
namespace sparse {

PADDLE_API Tensor to_sparse_coo(const Tensor& x,
                                Backend backend,
                                const int64_t sparse_dim) {
48
  if (x.layout() == phi::DataLayout::SPARSE_COO) {
49 50 51 52 53
    return x;
  }
  // 1. Get kernel signature and kernel
  auto kernel_key_set = ParseKernelKeyByInputArgs(x);
  kernel_key_set.backend_set = kernel_key_set.backend_set | BackendSet(backend);
54
  auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
55
  std::string kernel_name = "dense_to_sparse_coo";
56
  if (x.layout() == phi::DataLayout::SPARSE_CSR) {
57 58 59
    kernel_name = "sparse_csr_to_coo";
  }

60
  auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
61 62 63 64 65 66 67
      kernel_name, kernel_key);

  VLOG(6) << "to API kernel key: " << kernel_key;
  VLOG(6) << "to API kernel: " << kernel;

  // 2. Get Device Context
  auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend());
68
  auto kernel_context = phi::KernelContext(dev_ctx);
69 70

  // 3. Auto data transform
71 72
  if (x.layout() == phi::DataLayout::SPARSE_CSR) {
    auto input = std::dynamic_pointer_cast<phi::SparseCsrTensor>(x.impl());
73 74
    kernel_context.EmplaceBackInput(input.get());
  } else {
75
    auto input = std::dynamic_pointer_cast<phi::DenseTensor>(x.impl());
76 77 78 79 80
    kernel_context.EmplaceBackInput(input.get());
    kernel_context.EmplaceBackAttr(sparse_dim);
  }

  // 4. InferMeta
81 82 83
  auto indices_meta =
      phi::DenseTensorMeta(phi::DataType::INT64, {-1}, phi::DataLayout::NCHW);
  auto elements_meta = phi::DenseTensorMeta(x.dtype(), {-1}, x.layout());
84 85 86

  // 5. Prepare outputs
  // create empty SparseCooTensor
87 88 89
  phi::DenseTensor non_zero_indices(
      phi::make_intrusive<paddle::experimental::SharedStorage>(
          phi::TransToPtenPlace(backend)),
90
      std::move(indices_meta));
91 92 93
  phi::DenseTensor non_zero_elements(
      phi::make_intrusive<paddle::experimental::SharedStorage>(
          phi::TransToPtenPlace(backend)),
94
      std::move(elements_meta));
95
  auto coo = std::make_shared<phi::SparseCooTensor>(
96 97 98 99 100 101 102 103 104 105 106 107
      non_zero_indices, non_zero_elements, x.dims());

  kernel_context.EmplaceBackOutput(coo.get());
  Tensor out;
  out.set_impl(coo);

  // 6. Call kernel
  kernel(&kernel_context);

  return out;
}

108
PADDLE_API Tensor to_sparse_csr(const Tensor& x, Backend backend) {
109
  if (x.layout() == phi::DataLayout::SPARSE_CSR) {
110 111 112 113 114
    return x;
  }
  // 1. Get kernel signature and kernel
  auto kernel_key_set = ParseKernelKeyByInputArgs(x);
  kernel_key_set.backend_set = kernel_key_set.backend_set | BackendSet(backend);
115
  auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
116
  std::string kernel_name = "dense_to_sparse_csr";
117
  if (x.layout() == phi::DataLayout::SPARSE_COO) {
118 119 120
    kernel_name = "sparse_coo_to_csr";
  }

121
  auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
122 123 124 125 126 127 128
      kernel_name, kernel_key);

  VLOG(6) << "to API kernel key: " << kernel_key;
  VLOG(6) << "to API kernel: " << kernel;

  // 2. Get Device Context
  auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend());
129
  auto kernel_context = phi::KernelContext(dev_ctx);
130 131

  // 3. Auto data transform
132 133
  if (x.layout() == phi::DataLayout::SPARSE_COO) {
    auto input = std::dynamic_pointer_cast<phi::SparseCooTensor>(x.impl());
134 135
    kernel_context.EmplaceBackInput(input.get());
  } else {
136
    auto input = std::dynamic_pointer_cast<phi::DenseTensor>(x.impl());
137 138 139 140
    kernel_context.EmplaceBackInput(input.get());
  }

  // 4. InferMeta
141 142 143 144 145
  auto crows_meta =
      phi::DenseTensorMeta(phi::DataType::INT64, {-1}, phi::DataLayout::NCHW);
  auto cols_meta =
      phi::DenseTensorMeta(phi::DataType::INT64, {-1}, phi::DataLayout::NCHW);
  auto elements_meta = phi::DenseTensorMeta(x.dtype(), {-1}, x.layout());
146 147 148

  // 5. Prepare outputs
  // create empty SparseCooTensor
149 150 151
  phi::DenseTensor non_zero_crows(
      phi::make_intrusive<paddle::experimental::SharedStorage>(
          phi::TransToPtenPlace(backend)),
152
      std::move(crows_meta));
153 154 155
  phi::DenseTensor non_zero_cols(
      phi::make_intrusive<paddle::experimental::SharedStorage>(
          phi::TransToPtenPlace(backend)),
156
      std::move(cols_meta));
157 158 159
  phi::DenseTensor non_zero_elements(
      phi::make_intrusive<paddle::experimental::SharedStorage>(
          phi::TransToPtenPlace(backend)),
160
      std::move(elements_meta));
161
  auto csr = std::make_shared<phi::SparseCsrTensor>(
162 163 164 165 166 167 168 169 170 171 172
      non_zero_crows, non_zero_cols, non_zero_elements, x.dims());

  kernel_context.EmplaceBackOutput(csr.get());
  Tensor out;
  out.set_impl(csr);

  // 6. Call kernel
  kernel(&kernel_context);

  return out;
}
Z
zhangkaihuo 已提交
173 174

PADDLE_API Tensor to_dense(const Tensor& x, Backend backend) {
175 176
  if (x.layout() != phi::DataLayout::SPARSE_CSR &&
      x.layout() != phi::DataLayout::SPARSE_COO) {
Z
zhangkaihuo 已提交
177 178 179 180 181
    return x;
  }
  // 1. Get kernel signature and kernel
  auto kernel_key_set = ParseKernelKeyByInputArgs(x);
  kernel_key_set.backend_set = kernel_key_set.backend_set | BackendSet(backend);
182
  auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
Z
zhangkaihuo 已提交
183
  std::string kernel_name = "sparse_coo_to_dense";
184
  if (x.layout() == phi::DataLayout::SPARSE_CSR) {
Z
zhangkaihuo 已提交
185 186 187
    kernel_name = "sparse_csr_to_dense";
  }

188
  auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
Z
zhangkaihuo 已提交
189 190 191 192 193 194 195
      kernel_name, kernel_key);

  VLOG(6) << "to API kernel key: " << kernel_key;
  VLOG(6) << "to API kernel: " << kernel;

  // 2. Get Device Context
  auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend());
196
  auto kernel_context = phi::KernelContext(dev_ctx);
Z
zhangkaihuo 已提交
197 198

  // 3. Auto data transform
199 200
  if (x.layout() == phi::DataLayout::SPARSE_COO) {
    auto input = std::dynamic_pointer_cast<phi::SparseCooTensor>(x.impl());
Z
zhangkaihuo 已提交
201 202
    kernel_context.EmplaceBackInput(input.get());
  } else {
203
    auto input = std::dynamic_pointer_cast<phi::SparseCsrTensor>(x.impl());
Z
zhangkaihuo 已提交
204 205 206 207
    kernel_context.EmplaceBackInput(input.get());
  }

  // 4. InferMeta
208
  auto dense_meta = phi::DenseTensorMeta(x.dtype(), x.dims(), x.layout());
Z
zhangkaihuo 已提交
209 210 211

  // 5. Prepare outputs
  // create empty SparseCooTensor
212 213 214
  auto dense_out = std::make_shared<phi::DenseTensor>(
      phi::make_intrusive<paddle::experimental::SharedStorage>(
          phi::TransToPtenPlace(backend)),
Z
zhangkaihuo 已提交
215 216 217 218 219 220 221 222 223 224 225 226
      std::move(dense_meta));

  kernel_context.EmplaceBackOutput(dense_out.get());
  Tensor out;
  out.set_impl(dense_out);

  // 6. Call kernel
  kernel(&kernel_context);

  return out;
}

227 228 229 230
}  // namespace sparse
}  // namespace experimental
}  // namespace paddle

231
PD_REGISTER_API(SparseApi);