diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 8f74ef7e720f36fb98fa2224f433fc8843137320..eb9a0c0b338efecad6c5569e5690fbf44a34ad4a 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -37,6 +37,9 @@ #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_pass_builder.h" #include "paddle/fluid/inference/utils/io_utils.h" +#include "paddle/fluid/pybind/eager.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/core/compat/convert_utils.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) @@ -259,6 +262,55 @@ void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT static_cast(input_tensor.data()), shape, ToPaddleInferPlace(input_tensor.place().GetType())); + } else if (input_tensor.dtype() == phi::DataType::INT32) { + tensor.ShareExternalData( + static_cast(input_tensor.data()), + shape, + ToPaddleInferPlace(input_tensor.place().GetType())); + } else if (input_tensor.dtype() == phi::DataType::INT64) { + tensor.ShareExternalData( + static_cast(input_tensor.data()), + shape, + ToPaddleInferPlace(input_tensor.place().GetType())); + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupported data type. Now share_external_data only supports INT32, " + "INT64, FLOAT32 and FLOAT16.")); + } +} + +void PaddleTensorShareExternalData( + paddle_infer::Tensor &tensor, // NOLINT + paddle::experimental::Tensor &&paddle_tensor) { + std::vector shape; + for (int i = 0; i < paddle_tensor.dims().size(); ++i) { + shape.push_back(paddle_tensor.dims()[i]); + } + if (paddle_tensor.dtype() == paddle::experimental::DataType::FLOAT32) { + tensor.ShareExternalData( + static_cast(paddle_tensor.data()), + shape, + ToPaddleInferPlace(paddle_tensor.place().GetType())); + } else if (paddle_tensor.dtype() == paddle::experimental::DataType::FLOAT16) { + tensor.ShareExternalData( + static_cast( + paddle_tensor.data()), + shape, + ToPaddleInferPlace(paddle_tensor.place().GetType())); + } else if (paddle_tensor.dtype() == paddle::experimental::DataType::INT32) { + tensor.ShareExternalData( + static_cast(paddle_tensor.data()), + shape, + ToPaddleInferPlace(paddle_tensor.place().GetType())); + } else if (paddle_tensor.dtype() == paddle::experimental::DataType::INT64) { + tensor.ShareExternalData( + static_cast(paddle_tensor.data()), + shape, + ToPaddleInferPlace(paddle_tensor.place().GetType())); + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupported data type. Now share_external_data only supports INT32, " + "INT64, FLOAT32 and FLOAT16.")); } } @@ -1043,16 +1095,22 @@ void BindPaddleInferTensor(py::module *m) { .def("reshape", py::overload_cast( &paddle_infer::Tensor::ReshapeStrings)) - .def("copy_from_cpu_bind", &PaddleInferTensorCreate) - .def("copy_from_cpu_bind", &PaddleInferTensorCreate) - .def("copy_from_cpu_bind", &PaddleInferTensorCreate) - .def("copy_from_cpu_bind", &PaddleInferTensorCreate) - .def("copy_from_cpu_bind", &PaddleInferTensorCreate) - .def("copy_from_cpu_bind", + .def("_copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("_copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("_copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("_copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("_copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("_copy_from_cpu_bind", &PaddleInferTensorCreate) - .def("copy_from_cpu_bind", &PaddleInferTensorCreate) - .def("copy_from_cpu_bind", &PaddleInferStringTensorCreate) - .def("share_external_data_bind", &PaddleInferShareExternalData) + .def("_copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("_copy_from_cpu_bind", &PaddleInferStringTensorCreate) + .def("_share_external_data_bind", &PaddleInferShareExternalData) + .def("_share_external_data_paddle_tensor_bind", + [](paddle_infer::Tensor &self, const py::handle &input) { + PyObject *obj = input.ptr(); + PaddleTensorShareExternalData(self, + std::move(CastPyArg2Tensor(obj, 0))); + }) .def("copy_to_cpu", &PaddleInferTensorToNumpy) .def("shape", &paddle_infer::Tensor::shape) .def("set_lod", &paddle_infer::Tensor::SetLoD) diff --git a/python/paddle/fluid/tests/unittests/test_inference_api.py b/python/paddle/fluid/tests/unittests/test_inference_api.py index 289146035b4258e6f609a4b1fa4a34306775ba38..be470180c2f85f81e229afde221324aa03227395 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_api.py +++ b/python/paddle/fluid/tests/unittests/test_inference_api.py @@ -119,16 +119,55 @@ class TestInferenceBaseAPI(unittest.TestCase): predictor.run() def test_wrong_input(self): + program, params = get_sample_model() + config = self.get_config(program, params) + predictor = create_predictor(config) + in_names = predictor.get_input_names() + in_handle = predictor.get_input_handle(in_names[0]) + with self.assertRaises(TypeError): - program, params = get_sample_model() + in_data = np.ones((1, 6, 64, 64)).astype(np.float32) + in_handle.copy_from_cpu(list(in_data)) + predictor.run() + + with self.assertRaises(TypeError): + in_handle.share_external_data( + paddle.to_tensor( + np.full((1, 6, 32, 32), 1.0, "float32"), + place=paddle.CPUPlace(), + ) + ) + predictor.run() + + def test_share_external_data(self): + program, params = get_sample_model() + + def test_lod_tensor(): + config = Config() + config.set_model_buffer(program, len(program), params, len(params)) + predictor = create_predictor(config) + in_names = predictor.get_input_names() + in_handle = predictor.get_input_handle(in_names[0]) + in_data = paddle.fluid.create_lod_tensor( + np.full((1, 6, 32, 32), 1.0, "float32"), + [[1]], + paddle.fluid.CPUPlace(), + ) + in_handle.share_external_data(in_data) + predictor.run() + + def test_paddle_tensor(): config = self.get_config(program, params) predictor = create_predictor(config) in_names = predictor.get_input_names() in_handle = predictor.get_input_handle(in_names[0]) - in_data = np.ones((1, 6, 64, 64)).astype(np.float32) - in_handle.copy_from_cpu(list(in_data)) + in_data = paddle.Tensor(np.ones((1, 6, 32, 32)).astype(np.float32)) + in_handle.share_external_data(in_data) predictor.run() + test_lod_tensor() + test_paddle_tensor() + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/inference/wrapper.py b/python/paddle/inference/wrapper.py index ab532dc32266dcfa9c7d92d9bdb2d00891f888df..7899973ed2cb9561d03764124a8c0de217b13633 100644 --- a/python/paddle/inference/wrapper.py +++ b/python/paddle/inference/wrapper.py @@ -17,6 +17,7 @@ from typing import Set import numpy as np +import paddle import paddle.fluid.core as core from paddle.fluid.core import ( AnalysisConfig, @@ -42,7 +43,7 @@ def tensor_copy_from_cpu(self, data): if isinstance(data, np.ndarray) or ( isinstance(data, list) and len(data) > 0 and isinstance(data[0], str) ): - self.copy_from_cpu_bind(data) + self._copy_from_cpu_bind(data) else: raise TypeError( "In copy_from_cpu, we only support numpy ndarray and list[str] data type." @@ -54,10 +55,18 @@ def tensor_share_external_data(self, data): Support input type check based on tensor.share_external_data. ''' if isinstance(data, core.LoDTensor): - self.share_external_data_bind(data) + self._share_external_data_bind(data) + elif isinstance(data, paddle.Tensor): + self._share_external_data_paddle_tensor_bind(data) + elif isinstance(data, paddle.fluid.framework.Variable): + raise TypeError( + "The interface 'share_external_data' can only be used in dynamic graph mode. " + "Maybe you called 'paddle.enable_static()' and you are in static graph mode now. " + "Please use 'copy_from_cpu' instead." + ) else: raise TypeError( - "In share_external_data, we only support LoDTensor data type." + "In share_external_data, we only support Tensor and LoDTensor." )