未验证 提交 43ee4a33 编写于 作者: J JingZhuangzhuang 提交者: GitHub

[cherry-pick] add python share_date interface (#41627)

* add python share_date interface

* Update inference_api.cc

* add python share_data interface
上级 47b6e5ff
......@@ -36,6 +36,7 @@ namespace paddle {
using PaddleDType = paddle_infer::DataType;
using PaddlePlace = paddle_infer::PlaceType;
using PaddleDataLayout = paddle_infer::DataLayout;
/// \brief Memory manager for PaddleTensor.
///
......
......@@ -76,6 +76,7 @@ using paddle::NativeConfig;
using paddle::NativePaddlePredictor;
using paddle::PaddleBuf;
using paddle::PaddleDType;
using paddle::PaddleDataLayout;
using paddle::PaddlePassBuilder;
using paddle::PaddlePlace;
using paddle::PaddlePredictor;
......@@ -85,6 +86,7 @@ using paddle::ZeroCopyTensor;
namespace {
void BindPaddleDType(py::module *m);
void BindPaddleDataLayout(py::module *m);
void BindPaddleBuf(py::module *m);
void BindPaddleTensor(py::module *m);
void BindPaddlePlace(py::module *m);
......@@ -211,6 +213,34 @@ void PaddleInferTensorCreate(
tensor.CopyFromCpu(static_cast<const T *>(data.data()));
}
paddle_infer::PlaceType ToPaddleInferPlace(
phi::AllocationType allocation_type) {
if (allocation_type == phi::AllocationType::CPU) {
return paddle_infer::PlaceType::kCPU;
} else if (allocation_type == phi::AllocationType::GPU) {
return paddle_infer::PlaceType::kGPU;
} else {
return paddle_infer::PlaceType::kCPU;
}
}
void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT
framework::Tensor input_tensor) {
std::vector<int> shape;
for (int i = 0; i < input_tensor.dims().size(); ++i) {
shape.push_back(input_tensor.dims()[i]);
}
if (input_tensor.dtype() == phi::DataType::FLOAT32) {
tensor.ShareExternalData(
static_cast<float *>(input_tensor.data()), shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
} else if (input_tensor.dtype() == phi::DataType::FLOAT16) {
tensor.ShareExternalData(
static_cast<paddle::platform::float16 *>(input_tensor.data()), shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
}
}
/// \brief Experimental interface.
/// Create the Strings tensor from data.
/// \param tensor The tensor will be created and
......@@ -327,6 +357,7 @@ void CopyPaddleInferTensor(paddle_infer::Tensor &dst, // NOLINT
void BindInferenceApi(py::module *m) {
BindPaddleDType(m);
BindPaddleDataLayout(m);
BindPaddleBuf(m);
BindPaddleTensor(m);
BindPaddlePlace(m);
......@@ -372,6 +403,14 @@ void BindPaddleDType(py::module *m) {
.value("INT32", PaddleDType::INT32);
}
void BindPaddleDataLayout(py::module *m) {
py::enum_<PaddleDataLayout>(*m, "PaddleDataLayout")
.value("UNK", PaddleDataLayout::kUNK)
.value("Any", PaddleDataLayout::kAny)
.value("NHWC", PaddleDataLayout::kNHWC)
.value("NCHW", PaddleDataLayout::kNCHW);
}
void BindPaddleBuf(py::module *m) {
py::class_<PaddleBuf>(*m, "PaddleBuf")
.def(py::init<size_t>())
......@@ -817,6 +856,7 @@ void BindPaddleInferTensor(py::module *m) {
.def("copy_from_cpu_bind",
&PaddleInferTensorCreate<paddle_infer::float16>)
.def("copy_from_cpu_bind", &PaddleInferStringTensorCreate)
.def("share_external_data_bind", &PaddleInferShareExternalData)
.def("copy_to_cpu", &PaddleInferTensorToNumpy)
.def("shape", &paddle_infer::Tensor::shape)
.def("set_lod", &paddle_infer::Tensor::SetLoD)
......
......@@ -14,6 +14,7 @@
from ..core import AnalysisConfig, PaddleDType, PaddlePlace
from ..core import PaddleInferPredictor, PaddleInferTensor
from .. import core
import numpy as np
......@@ -39,4 +40,16 @@ def tensor_copy_from_cpu(self, data):
)
def tensor_share_external_data(self, data):
'''
Support input type check based on tensor.share_external_data.
'''
if isinstance(data, core.LoDTensor):
self.share_external_data_bind(data)
else:
raise TypeError(
"In share_external_data, we only support LoDTensor data type.")
Tensor.copy_from_cpu = tensor_copy_from_cpu
Tensor.share_external_data = tensor_share_external_data
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册