未验证 提交 8ad635d5 编写于 作者: J JingZhuangzhuang 提交者: GitHub

share_data interface support paddle.Tensor type (#50240)

上级 c36c7199
...@@ -37,6 +37,9 @@ ...@@ -37,6 +37,9 @@
#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_pass_builder.h" #include "paddle/fluid/inference/api/paddle_pass_builder.h"
#include "paddle/fluid/inference/utils/io_utils.h" #include "paddle/fluid/inference/utils/io_utils.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/convert_utils.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...@@ -259,6 +262,55 @@ void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT ...@@ -259,6 +262,55 @@ void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT
static_cast<paddle::platform::float16 *>(input_tensor.data()), static_cast<paddle::platform::float16 *>(input_tensor.data()),
shape, shape,
ToPaddleInferPlace(input_tensor.place().GetType())); ToPaddleInferPlace(input_tensor.place().GetType()));
} else if (input_tensor.dtype() == phi::DataType::INT32) {
tensor.ShareExternalData(
static_cast<int32_t *>(input_tensor.data()),
shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
} else if (input_tensor.dtype() == phi::DataType::INT64) {
tensor.ShareExternalData(
static_cast<int64_t *>(input_tensor.data()),
shape,
ToPaddleInferPlace(input_tensor.place().GetType()));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now share_external_data only supports INT32, "
"INT64, FLOAT32 and FLOAT16."));
}
}
void PaddleTensorShareExternalData(
paddle_infer::Tensor &tensor, // NOLINT
paddle::experimental::Tensor &&paddle_tensor) {
std::vector<int> shape;
for (int i = 0; i < paddle_tensor.dims().size(); ++i) {
shape.push_back(paddle_tensor.dims()[i]);
}
if (paddle_tensor.dtype() == paddle::experimental::DataType::FLOAT32) {
tensor.ShareExternalData(
static_cast<float *>(paddle_tensor.data<float>()),
shape,
ToPaddleInferPlace(paddle_tensor.place().GetType()));
} else if (paddle_tensor.dtype() == paddle::experimental::DataType::FLOAT16) {
tensor.ShareExternalData(
static_cast<paddle::platform::float16 *>(
paddle_tensor.data<paddle::platform::float16>()),
shape,
ToPaddleInferPlace(paddle_tensor.place().GetType()));
} else if (paddle_tensor.dtype() == paddle::experimental::DataType::INT32) {
tensor.ShareExternalData(
static_cast<int32_t *>(paddle_tensor.data<int32_t>()),
shape,
ToPaddleInferPlace(paddle_tensor.place().GetType()));
} else if (paddle_tensor.dtype() == paddle::experimental::DataType::INT64) {
tensor.ShareExternalData(
static_cast<int64_t *>(paddle_tensor.data<int64_t>()),
shape,
ToPaddleInferPlace(paddle_tensor.place().GetType()));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now share_external_data only supports INT32, "
"INT64, FLOAT32 and FLOAT16."));
} }
} }
...@@ -1043,16 +1095,22 @@ void BindPaddleInferTensor(py::module *m) { ...@@ -1043,16 +1095,22 @@ void BindPaddleInferTensor(py::module *m) {
.def("reshape", .def("reshape",
py::overload_cast<const std::size_t &>( py::overload_cast<const std::size_t &>(
&paddle_infer::Tensor::ReshapeStrings)) &paddle_infer::Tensor::ReshapeStrings))
.def("copy_from_cpu_bind", &PaddleInferTensorCreate<int8_t>) .def("_copy_from_cpu_bind", &PaddleInferTensorCreate<int8_t>)
.def("copy_from_cpu_bind", &PaddleInferTensorCreate<uint8_t>) .def("_copy_from_cpu_bind", &PaddleInferTensorCreate<uint8_t>)
.def("copy_from_cpu_bind", &PaddleInferTensorCreate<int32_t>) .def("_copy_from_cpu_bind", &PaddleInferTensorCreate<int32_t>)
.def("copy_from_cpu_bind", &PaddleInferTensorCreate<int64_t>) .def("_copy_from_cpu_bind", &PaddleInferTensorCreate<int64_t>)
.def("copy_from_cpu_bind", &PaddleInferTensorCreate<float>) .def("_copy_from_cpu_bind", &PaddleInferTensorCreate<float>)
.def("copy_from_cpu_bind", .def("_copy_from_cpu_bind",
&PaddleInferTensorCreate<paddle_infer::float16>) &PaddleInferTensorCreate<paddle_infer::float16>)
.def("copy_from_cpu_bind", &PaddleInferTensorCreate<bool>) .def("_copy_from_cpu_bind", &PaddleInferTensorCreate<bool>)
.def("copy_from_cpu_bind", &PaddleInferStringTensorCreate) .def("_copy_from_cpu_bind", &PaddleInferStringTensorCreate)
.def("share_external_data_bind", &PaddleInferShareExternalData) .def("_share_external_data_bind", &PaddleInferShareExternalData)
.def("_share_external_data_paddle_tensor_bind",
[](paddle_infer::Tensor &self, const py::handle &input) {
PyObject *obj = input.ptr();
PaddleTensorShareExternalData(self,
std::move(CastPyArg2Tensor(obj, 0)));
})
.def("copy_to_cpu", &PaddleInferTensorToNumpy) .def("copy_to_cpu", &PaddleInferTensorToNumpy)
.def("shape", &paddle_infer::Tensor::shape) .def("shape", &paddle_infer::Tensor::shape)
.def("set_lod", &paddle_infer::Tensor::SetLoD) .def("set_lod", &paddle_infer::Tensor::SetLoD)
......
...@@ -119,16 +119,55 @@ class TestInferenceBaseAPI(unittest.TestCase): ...@@ -119,16 +119,55 @@ class TestInferenceBaseAPI(unittest.TestCase):
predictor.run() predictor.run()
def test_wrong_input(self): def test_wrong_input(self):
program, params = get_sample_model()
config = self.get_config(program, params)
predictor = create_predictor(config)
in_names = predictor.get_input_names()
in_handle = predictor.get_input_handle(in_names[0])
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
program, params = get_sample_model() in_data = np.ones((1, 6, 64, 64)).astype(np.float32)
in_handle.copy_from_cpu(list(in_data))
predictor.run()
with self.assertRaises(TypeError):
in_handle.share_external_data(
paddle.to_tensor(
np.full((1, 6, 32, 32), 1.0, "float32"),
place=paddle.CPUPlace(),
)
)
predictor.run()
def test_share_external_data(self):
program, params = get_sample_model()
def test_lod_tensor():
config = Config()
config.set_model_buffer(program, len(program), params, len(params))
predictor = create_predictor(config)
in_names = predictor.get_input_names()
in_handle = predictor.get_input_handle(in_names[0])
in_data = paddle.fluid.create_lod_tensor(
np.full((1, 6, 32, 32), 1.0, "float32"),
[[1]],
paddle.fluid.CPUPlace(),
)
in_handle.share_external_data(in_data)
predictor.run()
def test_paddle_tensor():
config = self.get_config(program, params) config = self.get_config(program, params)
predictor = create_predictor(config) predictor = create_predictor(config)
in_names = predictor.get_input_names() in_names = predictor.get_input_names()
in_handle = predictor.get_input_handle(in_names[0]) in_handle = predictor.get_input_handle(in_names[0])
in_data = np.ones((1, 6, 64, 64)).astype(np.float32) in_data = paddle.Tensor(np.ones((1, 6, 32, 32)).astype(np.float32))
in_handle.copy_from_cpu(list(in_data)) in_handle.share_external_data(in_data)
predictor.run() predictor.run()
test_lod_tensor()
test_paddle_tensor()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -17,6 +17,7 @@ from typing import Set ...@@ -17,6 +17,7 @@ from typing import Set
import numpy as np import numpy as np
import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import ( from paddle.fluid.core import (
AnalysisConfig, AnalysisConfig,
...@@ -42,7 +43,7 @@ def tensor_copy_from_cpu(self, data): ...@@ -42,7 +43,7 @@ def tensor_copy_from_cpu(self, data):
if isinstance(data, np.ndarray) or ( if isinstance(data, np.ndarray) or (
isinstance(data, list) and len(data) > 0 and isinstance(data[0], str) isinstance(data, list) and len(data) > 0 and isinstance(data[0], str)
): ):
self.copy_from_cpu_bind(data) self._copy_from_cpu_bind(data)
else: else:
raise TypeError( raise TypeError(
"In copy_from_cpu, we only support numpy ndarray and list[str] data type." "In copy_from_cpu, we only support numpy ndarray and list[str] data type."
...@@ -54,10 +55,18 @@ def tensor_share_external_data(self, data): ...@@ -54,10 +55,18 @@ def tensor_share_external_data(self, data):
Support input type check based on tensor.share_external_data. Support input type check based on tensor.share_external_data.
''' '''
if isinstance(data, core.LoDTensor): if isinstance(data, core.LoDTensor):
self.share_external_data_bind(data) self._share_external_data_bind(data)
elif isinstance(data, paddle.Tensor):
self._share_external_data_paddle_tensor_bind(data)
elif isinstance(data, paddle.fluid.framework.Variable):
raise TypeError(
"The interface 'share_external_data' can only be used in dynamic graph mode. "
"Maybe you called 'paddle.enable_static()' and you are in static graph mode now. "
"Please use 'copy_from_cpu' instead."
)
else: else:
raise TypeError( raise TypeError(
"In share_external_data, we only support LoDTensor data type." "In share_external_data, we only support Tensor and LoDTensor."
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册