diff --git a/paddle/fluid/inference/api/paddle_inference_api.h b/paddle/fluid/inference/api/paddle_inference_api.h index da5d7411693c92eaa2066c7f76d56970f8939bc7..a58b510ecf16a4bb2e2be9f4c2946a550ea20d2d 100644 --- a/paddle/fluid/inference/api/paddle_inference_api.h +++ b/paddle/fluid/inference/api/paddle_inference_api.h @@ -73,7 +73,7 @@ class PD_INFER_DECL Tensor { class PD_INFER_DECL Predictor { public: - Predictor() = default; + Predictor() = delete; ~Predictor() {} // Use for clone explicit Predictor(std::unique_ptr&& pred) diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 040dd313f1c538b5792538f9da04635ff805b9a8..be4d90597e1e1c647ac6750ee7cebdc2ede8a551 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -60,6 +60,9 @@ void BindAnalysisConfig(py::module *m); void BindAnalysisPredictor(py::module *m); void BindZeroCopyTensor(py::module *m); void BindPaddlePassBuilder(py::module *m); +void BindPaddleInferPredictor(py::module *m); +void BindPaddleInferTensor(py::module *m); +void BindPredictorPool(py::module *m); #ifdef PADDLE_WITH_MKLDNN void BindMkldnnQuantizerConfig(py::module *m); @@ -139,6 +142,15 @@ void ZeroCopyTensorCreate(ZeroCopyTensor &tensor, // NOLINT tensor.copy_from_cpu(static_cast(data.data())); } +template +void PaddleInferTensorCreate(paddle_infer::Tensor &tensor, // NOLINT + py::array_t data) { + std::vector shape; + std::copy_n(data.shape(), data.ndim(), std::back_inserter(shape)); + tensor.Reshape(std::move(shape)); + tensor.CopyFromCpu(static_cast(data.data())); +} + size_t PaddleGetDTypeSize(PaddleDType dt) { size_t size{0}; switch (dt) { @@ -183,6 +195,30 @@ py::array ZeroCopyTensorToNumpy(ZeroCopyTensor &tensor) { // NOLINT return array; } +py::array PaddleInferTensorToNumpy(paddle_infer::Tensor &tensor) { // NOLINT + py::dtype dt = PaddleDTypeToNumpyDType(tensor.type()); + auto tensor_shape = tensor.shape(); + py::array::ShapeContainer shape(tensor_shape.begin(), tensor_shape.end()); + py::array array(dt, std::move(shape)); + + switch (tensor.type()) { + case PaddleDType::INT32: + tensor.CopyToCpu(static_cast(array.mutable_data())); + break; + case PaddleDType::INT64: + tensor.CopyToCpu(static_cast(array.mutable_data())); + break; + case PaddleDType::FLOAT32: + tensor.CopyToCpu(static_cast(array.mutable_data())); + break; + default: + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupported data type. Now only supports INT32, INT64 and " + "FLOAT32.")); + } + return array; +} + py::bytes SerializePDTensorToBytes(PaddleTensor &tensor) { // NOLINT std::stringstream ss; paddle::inference::SerializePDTensorToStream(&ss, tensor); @@ -200,8 +236,11 @@ void BindInferenceApi(py::module *m) { BindNativePredictor(m); BindAnalysisConfig(m); BindAnalysisPredictor(m); + BindPaddleInferPredictor(m); BindZeroCopyTensor(m); + BindPaddleInferTensor(m); BindPaddlePassBuilder(m); + BindPredictorPool(m); #ifdef PADDLE_WITH_MKLDNN BindMkldnnQuantizerConfig(m); #endif @@ -209,8 +248,17 @@ void BindInferenceApi(py::module *m) { &paddle::CreatePaddlePredictor, py::arg("config")); m->def("create_paddle_predictor", &paddle::CreatePaddlePredictor, py::arg("config")); + m->def("create_predictor", [](const paddle_infer::Config &config) + -> std::unique_ptr { + auto pred = + std::unique_ptr( + new paddle_infer::Predictor(config)); + return std::move(pred); + }); m->def("paddle_dtype_size", &paddle::PaddleDtypeSize); m->def("paddle_tensor_to_bytes", &SerializePDTensorToBytes); + m->def("get_version", &paddle_infer::GetVersion); + m->def("get_num_bytes_of_data_type", &paddle_infer::GetNumBytesOfDataType); } namespace { @@ -525,6 +573,19 @@ void BindAnalysisPredictor(py::module *m) { py::arg("dir")); } +void BindPaddleInferPredictor(py::module *m) { + py::class_(*m, "PaddleInferPredictor") + .def(py::init()) + .def("get_input_names", &paddle_infer::Predictor::GetInputNames) + .def("get_output_names", &paddle_infer::Predictor::GetOutputNames) + .def("get_input_handle", &paddle_infer::Predictor::GetInputHandle) + .def("get_output_handle", &paddle_infer::Predictor::GetOutputHandle) + .def("run", &paddle_infer::Predictor::Run) + .def("clone", &paddle_infer::Predictor::Clone) + .def("clear_intermediate_tensor", + &paddle_infer::Predictor::ClearIntermediateTensor); +} + void BindZeroCopyTensor(py::module *m) { py::class_(*m, "ZeroCopyTensor") .def("reshape", &ZeroCopyTensor::Reshape) @@ -538,6 +599,26 @@ void BindZeroCopyTensor(py::module *m) { .def("type", &ZeroCopyTensor::type); } +void BindPaddleInferTensor(py::module *m) { + py::class_(*m, "PaddleInferTensor") + .def("reshape", &paddle_infer::Tensor::Reshape) + .def("copy_from_cpu", &PaddleInferTensorCreate) + .def("copy_from_cpu", &PaddleInferTensorCreate) + .def("copy_from_cpu", &PaddleInferTensorCreate) + .def("copy_to_cpu", &PaddleInferTensorToNumpy) + .def("shape", &paddle_infer::Tensor::shape) + .def("set_lod", &paddle_infer::Tensor::SetLoD) + .def("lod", &paddle_infer::Tensor::lod) + .def("type", &paddle_infer::Tensor::type); +} + +void BindPredictorPool(py::module *m) { + py::class_(*m, "PredictorPool") + .def(py::init()) + .def("retrive", &paddle_infer::services::PredictorPool::Retrive, + py::return_value_policy::reference); +} + void BindPaddlePassBuilder(py::module *m) { py::class_(*m, "PaddlePassBuilder") .def(py::init &>()) diff --git a/python/paddle/fluid/inference/__init__.py b/python/paddle/fluid/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3013c1f2aff87fb293ea984c99d8336b418ee080 --- /dev/null +++ b/python/paddle/fluid/inference/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .wrapper import Config, DataType, PlaceType, PrecisionType, Tensor, Predictor + +from ..core import create_predictor, get_version, get_num_bytes_of_data_type, PredictorPool diff --git a/python/paddle/fluid/inference/wrapper.py b/python/paddle/fluid/inference/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..96885edcc5e822beb5db8332f2b58d12b9c4ff63 --- /dev/null +++ b/python/paddle/fluid/inference/wrapper.py @@ -0,0 +1,23 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ..core import AnalysisConfig, PaddleDType, PaddlePlace +from ..core import PaddleInferPredictor, PaddleInferTensor + +DataType = PaddleDType +PlaceType = PaddlePlace +PrecisionType = AnalysisConfig.Precision +Config = AnalysisConfig +Tensor = PaddleInferTensor +Predictor = PaddleInferPredictor diff --git a/python/setup.py.in b/python/setup.py.in index 64ac2b9b9a4d210c59193e117c6000986bfb07a0..773166400347ab550f82e4fabcb0d89b90818fc2 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -156,6 +156,7 @@ packages=['paddle', 'paddle.framework', 'paddle.jit', 'paddle.fluid', + 'paddle.fluid.inference', 'paddle.fluid.dygraph', 'paddle.fluid.dygraph.dygraph_to_static', 'paddle.fluid.dygraph.amp',