未验证 提交 be4a2077 编写于 作者: J JingZhuangzhuang 提交者: GitHub

add python share_data interface (#41626)

* add python share_data interface

* Update inference_api.cc

* Update inference_api.cc

* add python share_data interface
上级 de49a4b7
...@@ -36,6 +36,7 @@ namespace paddle { ...@@ -36,6 +36,7 @@ namespace paddle {
using PaddleDType = paddle_infer::DataType; using PaddleDType = paddle_infer::DataType;
using PaddlePlace = paddle_infer::PlaceType; using PaddlePlace = paddle_infer::PlaceType;
using PaddleDataLayout = paddle_infer::DataLayout;
/// \brief Memory manager for PaddleTensor. /// \brief Memory manager for PaddleTensor.
/// ///
......
...@@ -76,6 +76,7 @@ using paddle::NativeConfig; ...@@ -76,6 +76,7 @@ using paddle::NativeConfig;
using paddle::NativePaddlePredictor; using paddle::NativePaddlePredictor;
using paddle::PaddleBuf; using paddle::PaddleBuf;
using paddle::PaddleDType; using paddle::PaddleDType;
using paddle::PaddleDataLayout;
using paddle::PaddlePassBuilder; using paddle::PaddlePassBuilder;
using paddle::PaddlePlace; using paddle::PaddlePlace;
using paddle::PaddlePredictor; using paddle::PaddlePredictor;
...@@ -85,6 +86,7 @@ using paddle::ZeroCopyTensor; ...@@ -85,6 +86,7 @@ using paddle::ZeroCopyTensor;
namespace { namespace {
void BindPaddleDType(py::module *m); void BindPaddleDType(py::module *m);
void BindPaddleDataLayout(py::module *m);
void BindPaddleBuf(py::module *m); void BindPaddleBuf(py::module *m);
void BindPaddleTensor(py::module *m); void BindPaddleTensor(py::module *m);
void BindPaddlePlace(py::module *m); void BindPaddlePlace(py::module *m);
...@@ -211,6 +213,34 @@ void PaddleInferTensorCreate( ...@@ -211,6 +213,34 @@ void PaddleInferTensorCreate(
tensor.CopyFromCpu(static_cast<const T *>(data.data())); tensor.CopyFromCpu(static_cast<const T *>(data.data()));
} }
paddle_infer::PlaceType ToPaddleInferPlace(
phi::AllocationType allocation_type) {
if (allocation_type == phi::AllocationType::CPU) {
return paddle_infer::PlaceType::kCPU;
} else if (allocation_type == phi::AllocationType::GPU) {
return paddle_infer::PlaceType::kGPU;
} else {
return paddle_infer::PlaceType::kCPU;
}
}
void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT
framework::Tensor input_tensor) {
std::vector<int> shape;
for (int i = 0; i < input_tensor.dims().size(); ++i) {
shape.push_back(input_tensor.dims()[i]);
}
if (input_tensor.dtype() == phi::DataType::FLOAT32) {
tensor.ShareExternalData(
static_cast<float *>(input_tensor.data()), shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
} else if (input_tensor.dtype() == phi::DataType::FLOAT16) {
tensor.ShareExternalData(
static_cast<paddle::platform::float16 *>(input_tensor.data()), shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
}
}
/// \brief Experimental interface. /// \brief Experimental interface.
/// Create the Strings tensor from data. /// Create the Strings tensor from data.
/// \param tensor The tensor will be created and /// \param tensor The tensor will be created and
...@@ -327,6 +357,7 @@ void CopyPaddleInferTensor(paddle_infer::Tensor &dst, // NOLINT ...@@ -327,6 +357,7 @@ void CopyPaddleInferTensor(paddle_infer::Tensor &dst, // NOLINT
void BindInferenceApi(py::module *m) { void BindInferenceApi(py::module *m) {
BindPaddleDType(m); BindPaddleDType(m);
BindPaddleDataLayout(m);
BindPaddleBuf(m); BindPaddleBuf(m);
BindPaddleTensor(m); BindPaddleTensor(m);
BindPaddlePlace(m); BindPaddlePlace(m);
...@@ -372,6 +403,14 @@ void BindPaddleDType(py::module *m) { ...@@ -372,6 +403,14 @@ void BindPaddleDType(py::module *m) {
.value("INT32", PaddleDType::INT32); .value("INT32", PaddleDType::INT32);
} }
void BindPaddleDataLayout(py::module *m) {
py::enum_<PaddleDataLayout>(*m, "PaddleDataLayout")
.value("UNK", PaddleDataLayout::kUNK)
.value("Any", PaddleDataLayout::kAny)
.value("NHWC", PaddleDataLayout::kNHWC)
.value("NCHW", PaddleDataLayout::kNCHW);
}
void BindPaddleBuf(py::module *m) { void BindPaddleBuf(py::module *m) {
py::class_<PaddleBuf>(*m, "PaddleBuf") py::class_<PaddleBuf>(*m, "PaddleBuf")
.def(py::init<size_t>()) .def(py::init<size_t>())
...@@ -817,6 +856,7 @@ void BindPaddleInferTensor(py::module *m) { ...@@ -817,6 +856,7 @@ void BindPaddleInferTensor(py::module *m) {
.def("copy_from_cpu_bind", .def("copy_from_cpu_bind",
&PaddleInferTensorCreate<paddle_infer::float16>) &PaddleInferTensorCreate<paddle_infer::float16>)
.def("copy_from_cpu_bind", &PaddleInferStringTensorCreate) .def("copy_from_cpu_bind", &PaddleInferStringTensorCreate)
.def("share_external_data_bind", &PaddleInferShareExternalData)
.def("copy_to_cpu", &PaddleInferTensorToNumpy) .def("copy_to_cpu", &PaddleInferTensorToNumpy)
.def("shape", &paddle_infer::Tensor::shape) .def("shape", &paddle_infer::Tensor::shape)
.def("set_lod", &paddle_infer::Tensor::SetLoD) .def("set_lod", &paddle_infer::Tensor::SetLoD)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
from ..core import AnalysisConfig, PaddleDType, PaddlePlace from ..core import AnalysisConfig, PaddleDType, PaddlePlace
from ..core import PaddleInferPredictor, PaddleInferTensor from ..core import PaddleInferPredictor, PaddleInferTensor
from .. import core
import numpy as np import numpy as np
...@@ -39,4 +40,16 @@ def tensor_copy_from_cpu(self, data): ...@@ -39,4 +40,16 @@ def tensor_copy_from_cpu(self, data):
) )
def tensor_share_external_data(self, data):
'''
Support input type check based on tensor.share_external_data.
'''
if isinstance(data, core.LoDTensor):
self.share_external_data_bind(data)
else:
raise TypeError(
"In share_external_data, we only support LoDTensor data type.")
Tensor.copy_from_cpu = tensor_copy_from_cpu Tensor.copy_from_cpu = tensor_copy_from_cpu
Tensor.share_external_data = tensor_share_external_data
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册