未验证 提交 5f5648a8 编写于 作者: T Tao Luo 提交者: GitHub

Revert "Python inference API support numpy (#19009)" (#19160)

test=develop
上级 0019eb37
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/pybind/inference_api.h" #include "paddle/fluid/pybind/inference_api.h"
#include <pybind11/numpy.h>
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include <cstring> #include <cstring>
#include <iostream> #include <iostream>
...@@ -21,7 +20,6 @@ ...@@ -21,7 +20,6 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include <unordered_set> #include <unordered_set>
#include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/inference/api/analysis_predictor.h" #include "paddle/fluid/inference/api/analysis_predictor.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_api.h"
...@@ -53,81 +51,6 @@ void BindAnalysisPredictor(py::module *m); ...@@ -53,81 +51,6 @@ void BindAnalysisPredictor(py::module *m);
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
void BindMkldnnQuantizerConfig(py::module *m); void BindMkldnnQuantizerConfig(py::module *m);
#endif #endif
template <typename T>
PaddleBuf PaddleBufCreate(py::array_t<T> data) {
PaddleBuf buf(data.size() * sizeof(T));
std::copy_n(static_cast<T *>(data.mutable_data()), data.size(),
static_cast<T *>(buf.data()));
return buf;
}
template <typename T>
void PaddleBufReset(PaddleBuf &buf, py::array_t<T> data) { // NOLINT
buf.Resize(data.size() * sizeof(T));
std::copy_n(static_cast<T *>(data.mutable_data()), data.size(),
static_cast<T *>(buf.data()));
}
template <typename T>
PaddleDType PaddleTensorGetDType();
template <>
PaddleDType PaddleTensorGetDType<int32_t>() {
return PaddleDType::INT32;
}
template <>
PaddleDType PaddleTensorGetDType<int64_t>() {
return PaddleDType::INT64;
}
template <>
PaddleDType PaddleTensorGetDType<float>() {
return PaddleDType::FLOAT32;
}
template <typename T>
PaddleTensor PaddleTensorCreate(
py::array_t<T> data, const std::string name = "",
const std::vector<std::vector<size_t>> &lod = {}, bool copy = false) {
PaddleTensor tensor;
if (copy) {
PaddleBuf buf(data.size() * sizeof(T));
std::copy_n(static_cast<T *>(data.mutable_data()), data.size(),
static_cast<T *>(buf.data()));
tensor.data = std::move(buf);
} else {
tensor.data = PaddleBuf(data.mutable_data(), data.size() * sizeof(T));
}
tensor.dtype = PaddleTensorGetDType<T>();
tensor.name = name;
tensor.lod = lod;
tensor.shape.resize(data.ndim());
std::copy_n(data.shape(), data.ndim(), tensor.shape.begin());
return tensor;
}
py::array PaddleTensorGetData(PaddleTensor &tensor) { // NOLINT
py::dtype dt;
switch (tensor.dtype) {
case PaddleDType::INT32:
dt = py::dtype::of<int32_t>();
break;
case PaddleDType::INT64:
dt = py::dtype::of<int64_t>();
break;
case PaddleDType::FLOAT32:
dt = py::dtype::of<float>();
break;
default:
LOG(FATAL) << "unsupported dtype";
}
return py::array(dt, {tensor.shape}, tensor.data.data());
}
} // namespace } // namespace
void BindInferenceApi(py::module *m) { void BindInferenceApi(py::module *m) {
...@@ -166,39 +89,23 @@ void BindPaddleBuf(py::module *m) { ...@@ -166,39 +89,23 @@ void BindPaddleBuf(py::module *m) {
std::memcpy(buf.data(), static_cast<void *>(data.data()), buf.length()); std::memcpy(buf.data(), static_cast<void *>(data.data()), buf.length());
return buf; return buf;
})) }))
.def(py::init(&PaddleBufCreate<int32_t>)) .def(py::init([](std::vector<int64_t> &data) {
.def(py::init(&PaddleBufCreate<int64_t>)) auto buf = PaddleBuf(data.size() * sizeof(int64_t));
.def(py::init(&PaddleBufCreate<float>)) std::memcpy(buf.data(), static_cast<void *>(data.data()), buf.length());
return buf;
}))
.def("resize", &PaddleBuf::Resize) .def("resize", &PaddleBuf::Resize)
.def("reset", .def("reset",
[](PaddleBuf &self, std::vector<float> &data) { [](PaddleBuf &self, std::vector<float> &data) {
self.Resize(data.size() * sizeof(float)); self.Resize(data.size() * sizeof(float));
std::memcpy(self.data(), data.data(), self.length()); std::memcpy(self.data(), data.data(), self.length());
}) })
.def("reset", &PaddleBufReset<int32_t>) .def("reset",
.def("reset", &PaddleBufReset<int64_t>) [](PaddleBuf &self, std::vector<int64_t> &data) {
.def("reset", &PaddleBufReset<float>) self.Resize(data.size() * sizeof(int64_t));
.def("empty", &PaddleBuf::empty) std::memcpy(self.data(), data.data(), self.length());
.def("tolist",
[](PaddleBuf &self, const std::string &dtype) -> py::list {
py::list l;
if (dtype == "int32") {
auto *data = static_cast<int32_t *>(self.data());
auto size = self.length() / sizeof(int32_t);
l = py::cast(std::vector<int32_t>(data, data + size));
} else if (dtype == "int64") {
auto *data = static_cast<int64_t *>(self.data());
auto size = self.length() / sizeof(int64_t);
l = py::cast(std::vector<int64_t>(data, data + size));
} else if (dtype == "float32") {
auto *data = static_cast<float *>(self.data());
auto size = self.length() / sizeof(float);
l = py::cast(std::vector<float>(data, data + size));
} else {
LOG(FATAL) << "unsupported dtype";
}
return l;
}) })
.def("empty", &PaddleBuf::empty)
.def("float_data", .def("float_data",
[](PaddleBuf &self) -> std::vector<float> { [](PaddleBuf &self) -> std::vector<float> {
auto *data = static_cast<float *>(self.data()); auto *data = static_cast<float *>(self.data());
...@@ -220,19 +127,6 @@ void BindPaddleBuf(py::module *m) { ...@@ -220,19 +127,6 @@ void BindPaddleBuf(py::module *m) {
void BindPaddleTensor(py::module *m) { void BindPaddleTensor(py::module *m) {
py::class_<PaddleTensor>(*m, "PaddleTensor") py::class_<PaddleTensor>(*m, "PaddleTensor")
.def(py::init<>()) .def(py::init<>())
.def(py::init(&PaddleTensorCreate<int32_t>), py::arg("data"),
py::arg("name") = "",
py::arg("lod") = std::vector<std::vector<size_t>>(),
py::arg("copy") = false)
.def(py::init(&PaddleTensorCreate<int64_t>), py::arg("data"),
py::arg("name") = "",
py::arg("lod") = std::vector<std::vector<size_t>>(),
py::arg("copy") = false)
.def(py::init(&PaddleTensorCreate<float>), py::arg("data"),
py::arg("name") = "",
py::arg("lod") = std::vector<std::vector<size_t>>(),
py::arg("copy") = false)
.def("as_ndarray", &PaddleTensorGetData)
.def_readwrite("name", &PaddleTensor::name) .def_readwrite("name", &PaddleTensor::name)
.def_readwrite("shape", &PaddleTensor::shape) .def_readwrite("shape", &PaddleTensor::shape)
.def_readwrite("data", &PaddleTensor::data) .def_readwrite("data", &PaddleTensor::data)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册