api_gen_utils.cc 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/api/lib/api_gen_utils.h"
16 17 18 19 20 21

namespace paddle {
namespace experimental {

/* ------------------ for input ----------------------- */

22
std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(const Tensor& tensor) {
Z
zyfncg 已提交
23
  return std::static_pointer_cast<phi::DenseTensor>(tensor.impl());
24 25
}

26 27
paddle::optional<phi::DenseTensor> TensorToDenseTensor(
    const paddle::optional<Tensor>& tensor) {
28
  if (tensor) {
29
    return {*std::static_pointer_cast<phi::DenseTensor>(tensor->impl())};
30 31 32 33
  }
  return nullptr;
}

34
std::unique_ptr<std::vector<phi::DenseTensor*>> TensorToDenseTensor(
35
    const std::vector<Tensor>& tensors) {
36
  auto pt_tensors = std::make_unique<std::vector<phi::DenseTensor*>>();
37 38 39 40
  pt_tensors->reserve(tensors.size());

  for (const auto& t : tensors) {
    pt_tensors->push_back(
41
        std::dynamic_pointer_cast<phi::DenseTensor>(t.impl()).get());
42 43
  }

44
  return pt_tensors;
45 46
}

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
std::vector<const phi::DenseTensor*> TensorToConstDenseTensorPtr(
    const std::vector<Tensor>& tensors) {
  std::vector<const phi::DenseTensor*> pt_tensors(tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    pt_tensors[i] = static_cast<phi::DenseTensor*>(tensors[i].impl().get());
  }

  return pt_tensors;
}

paddle::optional<std::vector<const phi::DenseTensor*>>
TensorToConstDenseTensorPtr(
    const paddle::optional<std::vector<Tensor>>& tensors) {
  paddle::optional<std::vector<const phi::DenseTensor*>> pt_tensors;

  if (tensors) {
    pt_tensors =
        paddle::optional<std::vector<const phi::DenseTensor*>>(tensors->size());
    for (size_t i = 0; i < tensors->size(); ++i) {
      pt_tensors->at(i) =
          static_cast<phi::DenseTensor*>(tensors->at(i).impl().get());
    }
  }

  return pt_tensors;
}

75
std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(const Tensor& tensor) {
Z
zyfncg 已提交
76
  return std::static_pointer_cast<phi::SelectedRows>(tensor.impl());
77 78
}

79 80
paddle::optional<phi::SelectedRows> TensorToSelectedRows(
    const paddle::optional<Tensor>& tensor) {
81
  if (tensor) {
82
    return {*std::static_pointer_cast<phi::SelectedRows>(tensor->impl())};
83 84 85 86
  }
  return nullptr;
}

J
Jack Zhou 已提交
87 88 89 90
std::shared_ptr<phi::StringTensor> TensorToStringTensor(const Tensor& tensor) {
  return std::dynamic_pointer_cast<phi::StringTensor>(tensor.impl());
}

Z
zhangkaihuo 已提交
91 92 93 94
std::shared_ptr<phi::SparseCooTensor> TensorToSparseCooTensor(
    const Tensor& tensor) {
  return std::static_pointer_cast<phi::SparseCooTensor>(tensor.impl());
}
95 96
/* ----------------- for infer_meta --------------------- */

97
phi::MetaTensor MakeMetaTensor(const phi::TensorBase& tensor) {
98 99 100
  return phi::MetaTensor(tensor);
}

Y
YuanRisheng 已提交
101 102 103 104 105 106 107 108 109 110
std::vector<phi::MetaTensor> MakeMetaTensor(
    const std::vector<const phi::TensorBase*>& tensors) {
  std::vector<phi::MetaTensor> meta_tensors;
  meta_tensors.reserve(tensors.size());
  for (const auto* t : tensors) {
    meta_tensors.emplace_back(*t);
  }
  return meta_tensors;
}

111 112
phi::MetaTensor MakeMetaTensor(
    const paddle::optional<phi::DenseTensor>& tensor) {
Z
zyfncg 已提交
113 114 115
  if (tensor) {
    return {phi::MetaTensor(*tensor)};
  }
116
  return phi::MetaTensor();
Z
zyfncg 已提交
117 118
}

119
std::vector<phi::MetaTensor> MakeMetaTensor(
120
    const std::vector<const phi::DenseTensor*>& tensors) {
121 122
  std::vector<phi::MetaTensor> meta_tensors;
  meta_tensors.reserve(tensors.size());
123 124
  for (const auto* t : tensors) {
    meta_tensors.emplace_back(*t);
125 126 127 128
  }
  return meta_tensors;
}

Y
YuanRisheng 已提交
129 130 131 132 133 134 135 136 137 138
std::vector<phi::MetaTensor> MakeMetaTensor(
    const std::vector<const phi::SelectedRows*>& tensors) {
  std::vector<phi::MetaTensor> meta_tensors;
  meta_tensors.reserve(tensors.size());
  for (const auto* t : tensors) {
    meta_tensors.emplace_back(*t);
  }
  return meta_tensors;
}

139 140 141 142 143 144 145 146 147 148
std::vector<phi::MetaTensor> MakeMetaTensor(
    const std::vector<phi::DenseTensor*>& tensors) {
  std::vector<phi::MetaTensor> meta_tensors;
  meta_tensors.reserve(tensors.size());
  for (auto* t : tensors) {
    meta_tensors.emplace_back(*t);
  }
  return meta_tensors;
}

149 150
phi::MetaTensor MakeMetaTensor(
    const paddle::optional<phi::SelectedRows>& tensor) {
Z
zyfncg 已提交
151 152 153
  if (tensor) {
    return {phi::MetaTensor(*tensor)};
  }
154
  return phi::MetaTensor();
Z
zyfncg 已提交
155 156
}

Z
zhangkaihuo 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
phi::MetaTensor MakeMetaTensor(
    const paddle::optional<phi::SparseCooTensor>& tensor) {
  if (tensor) {
    return {phi::MetaTensor(*tensor)};
  }
  return phi::MetaTensor();
}

phi::MetaTensor MakeMetaTensor(
    const paddle::optional<phi::SparseCsrTensor>& tensor) {
  if (tensor) {
    return {phi::MetaTensor(*tensor)};
  }
  return phi::MetaTensor();
}

173 174 175 176 177 178 179 180 181 182 183 184
std::vector<phi::MetaTensor> MakeMetaTensor(
    const paddle::optional<std::vector<const phi::DenseTensor*>>& tensors) {
  std::vector<phi::MetaTensor> meta_tensors;
  if (tensors) {
    meta_tensors.reserve(tensors->size());
    for (auto* t : tensors.get()) {
      meta_tensors.emplace_back(*t);
    }
  }
  return meta_tensors;
}

185 186
/* ------------------ for output ----------------------- */

Z
zyfncg 已提交
187
phi::DenseTensor* SetKernelOutput(Tensor* out) {
188 189 190 191 192
  if (out) {
    if (out->impl() == nullptr) {
      out->set_impl(std::make_shared<phi::DenseTensor>());
    }
    return static_cast<phi::DenseTensor*>(out->impl().get());
193
  }
194
  return nullptr;
195 196
}

197 198
std::vector<phi::DenseTensor*> SetKernelOutput(size_t out_size,
                                               std::vector<Tensor>* out) {
199 200 201
  out->reserve(out_size);
  std::vector<phi::DenseTensor*> results(out_size);
  for (size_t i = 0; i < out_size; ++i) {
202
    auto tensor_ptr = std::make_shared<phi::DenseTensor>();
203 204 205 206 207 208 209
    results[i] = tensor_ptr.get();
    out->emplace_back();
    out->back().set_impl(tensor_ptr);
  }
  return results;
}

210
std::vector<phi::DenseTensor*> SetInplaceVectorKernelOutput(
Z
zyfncg 已提交
211
    size_t out_size, std::vector<Tensor>* out) {
212 213 214 215 216 217 218 219
  std::vector<phi::DenseTensor*> results(out->size(), nullptr);
  for (size_t i = 0; i < out->size(); ++i) {
    results[i] = static_cast<phi::DenseTensor*>(out->at(i).impl().get());
  }
  return results;
}

std::vector<phi::DenseTensor*> SetInplaceOptionalVectorKernelOutput(
Z
zyfncg 已提交
220
    size_t out_size, const paddle::optional<std::vector<Tensor>>& out) {
221 222 223 224 225 226 227 228 229 230
  std::vector<phi::DenseTensor*> results;
  if (out) {
    results = std::vector<phi::DenseTensor*>(out->size(), nullptr);
    for (size_t i = 0; i < out->size(); ++i) {
      results[i] = static_cast<phi::DenseTensor*>(out->at(i).impl().get());
    }
  }
  return results;
}

231 232 233 234 235 236 237 238 239 240 241 242
std::vector<phi::DenseTensor*> SetKernelOutput(std::vector<Tensor*>* out) {
  std::vector<phi::DenseTensor*> results(out->size(), nullptr);
  for (size_t i = 0; i < out->size(); ++i) {
    if (out->at(i)) {
      auto tensor_ptr = std::make_shared<phi::DenseTensor>();
      results[i] = tensor_ptr.get();
      (*out)[i]->set_impl(tensor_ptr);
    }
  }
  return results;
}

Z
zyfncg 已提交
243
phi::SelectedRows* SetSelectedRowsKernelOutput(Tensor* out) {
244 245 246 247 248 249 250 251
  if (!out->initialized()) {
    auto select_rows = std::make_shared<phi::SelectedRows>();
    out->set_impl(select_rows);
    return select_rows.get();
  }
  return static_cast<phi::SelectedRows*>(out->impl().get());
}

252
phi::TensorBase* SetSparseKernelOutput(Tensor* out, TensorType type) {
Z
zhangkaihuo 已提交
253 254 255
  if (!out) {
    return nullptr;
  }
256 257 258 259 260 261 262 263 264 265 266
  if (!out->initialized()) {
    if (type == TensorType::SPARSE_COO) {
      auto sparse_tensor = std::make_shared<phi::SparseCooTensor>(
          phi::DenseTensor(), phi::DenseTensor(), phi::DDim{-1});
      out->set_impl(sparse_tensor);
      return sparse_tensor.get();
    } else if (type == TensorType::SPARSE_CSR) {
      auto sparse_tensor =
          std::make_shared<phi::SparseCsrTensor>(phi::DenseTensor(),
                                                 phi::DenseTensor(),
                                                 phi::DenseTensor(),
T
tiancaishaonvjituizi 已提交
267
                                                 phi::DDim{-1, -1});
268 269 270 271 272 273 274 275 276 277 278
      out->set_impl(sparse_tensor);
      return sparse_tensor.get();
    } else {
      auto dense_tensor = std::make_shared<phi::DenseTensor>();
      out->set_impl(dense_tensor);
      return dense_tensor.get();
    }
  }
  return out->impl().get();
}

Z
zyfncg 已提交
279
phi::TensorBase* SetStringsKernelOutput(Tensor* out, TensorType type) {
J
Jack Zhou 已提交
280 281 282 283 284 285 286 287 288 289 290 291
  if (!out->initialized()) {
    if (type == TensorType::STRING_TENSOR) {
      if (out->impl() == nullptr) {
        auto strings_tensor = std::make_shared<phi::StringTensor>();
        out->set_impl(strings_tensor);
      }
      return out->impl().get();
    }
  }
  return out->impl().get();
}

292 293
}  // namespace experimental
}  // namespace paddle