From c1a96315a8889e129f0f07523c2f968e430139ec Mon Sep 17 00:00:00 2001 From: MRXLT Date: Mon, 9 Mar 2020 14:35:12 +0800 Subject: [PATCH] use PredictorRes --- core/general-client/include/general_model.h | 29 +------------------ core/general-client/src/general_model.cpp | 25 +++++++--------- .../src/pybind_general_model.cpp | 8 +---- python/paddle_serving_client/__init__.py | 11 ++++--- 4 files changed, 18 insertions(+), 55 deletions(-) diff --git a/core/general-client/include/general_model.h b/core/general-client/include/general_model.h index 6982bb3e..5a941c54 100644 --- a/core/general-client/include/general_model.h +++ b/core/general-client/include/general_model.h @@ -59,18 +59,6 @@ class PredictorRes { std::map>> _float_map; }; -class PredictorResBatch { - public: - PredictorResBatch() {} - ~PredictorResBatch() {} - - public: - const PredictorRes& at(const int index) { return _predictres_vector[index]; } - - public: - std::vector _predictres_vector; -}; - class PredictorClient { public: PredictorClient() {} @@ -96,28 +84,13 @@ class PredictorClient { PredictorRes& predict_res, // NOLINT const int& pid); - std::vector> predict( - const std::vector>& float_feed, - const std::vector& float_feed_name, - const std::vector>& int_feed, - const std::vector& int_feed_name, - const std::vector& fetch_name); - int batch_predict( const std::vector>>& float_feed_batch, const std::vector& float_feed_name, const std::vector>>& int_feed_batch, const std::vector& int_feed_name, const std::vector& fetch_name, - PredictorResBatch& predict_res, // NOLINT - const int& pid); - - std::vector batch_predict( - const std::vector>>& float_feed_batch, - const std::vector& float_feed_name, - const std::vector>>& int_feed_batch, - const std::vector& int_feed_name, - const std::vector& fetch_name, + PredictorRes& predict_res_batch, // NOLINT const int& pid); private: diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index a0f63dac..d1ad58d4 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -270,14 +270,15 @@ int PredictorClient::batch_predict( const std::vector>> &int_feed_batch, const std::vector &int_feed_name, const std::vector &fetch_name, - PredictorResBatch &predict_res_batch, + PredictorRes &predict_res_batch, const int &pid) { int batch_size = std::max(float_feed_batch.size(), int_feed_batch.size()); + predict_res_batch._int64_map.clear(); + predict_res_batch._float_map.clear(); Timer timeline; int64_t preprocess_start = timeline.TimeStampUS(); - predict_res_batch._predictres_vector.resize(batch_size); int fetch_name_num = fetch_name.size(); _api.thrd_clear(); @@ -366,37 +367,33 @@ int PredictorClient::batch_predict( } else { client_infer_end = timeline.TimeStampUS(); postprocess_start = client_infer_end; - + for (auto &name : fetch_name) { + predict_res_batch._int64_map[name].resize(batch_size); + predict_res_batch._float_map[name].resize(batch_size); + } for (int bi = 0; bi < batch_size; bi++) { - predict_res_batch._predictres_vector[bi]._int64_map.clear(); - predict_res_batch._predictres_vector[bi]._float_map.clear(); - for (auto &name : fetch_name) { int idx = _fetch_name_to_idx[name]; int len = res.insts(bi).tensor_array(idx).data_size(); if (_fetch_name_to_type[name] == 0) { int len = res.insts(bi).tensor_array(idx).int64_data_size(); VLOG(2) << "fetch tensor : " << name << " type: int64 len : " << len; - predict_res_batch._predictres_vector[bi]._int64_map[name].resize(1); - predict_res_batch._predictres_vector[bi]._int64_map[name] - [0].resize(len); + predict_res_batch._int64_map[name][bi].resize(len); VLOG(2) << "fetch name " << name << " index " << idx << " first data " << res.insts(bi).tensor_array(idx).int64_data(0); for (int i = 0; i < len; ++i) { - predict_res_batch._predictres_vector[bi]._int64_map[name][0][i] = + predict_res_batch._int64_map[name][bi][i] = res.insts(bi).tensor_array(idx).int64_data(i); } } else if (_fetch_name_to_type[name] == 1) { int len = res.insts(bi).tensor_array(idx).float_data_size(); VLOG(2) << "fetch tensor : " << name << " type: float32 len : " << len; - predict_res_batch._predictres_vector[bi]._float_map[name].resize(1); - predict_res_batch._predictres_vector[bi]._float_map[name] - [0].resize(len); + predict_res_batch._float_map[name][bi].resize(len); VLOG(2) << "fetch name " << name << " index " << idx << " first data " << res.insts(bi).tensor_array(idx).float_data(0); for (int i = 0; i < len; ++i) { - predict_res_batch._predictres_vector[bi]._float_map[name][0][i] = + predict_res_batch._float_map[name][bi][i] = res.insts(bi).tensor_array(idx).float_data(i); } } diff --git a/core/general-client/src/pybind_general_model.cpp b/core/general-client/src/pybind_general_model.cpp index e56dfb1f..0d0ca7bd 100644 --- a/core/general-client/src/pybind_general_model.cpp +++ b/core/general-client/src/pybind_general_model.cpp @@ -41,12 +41,6 @@ PYBIND11_MODULE(serving_client, m) { }, py::return_value_policy::reference); - py::class_(m, "PredictorResBatch", py::buffer_protocol()) - .def(py::init()) - .def("at", - [](PredictorResBatch &self, int index) { return self.at(index); }, - py::return_value_policy::reference); - py::class_(m, "PredictorClient", py::buffer_protocol()) .def(py::init()) .def("init_gflags", @@ -97,7 +91,7 @@ PYBIND11_MODULE(serving_client, m) { &int_feed_batch, const std::vector &int_feed_name, const std::vector &fetch_name, - PredictorResBatch &predict_res_batch, + PredictorRes &predict_res_batch, const int &pid) { return self.batch_predict(float_feed_batch, float_feed_name, diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 532e9fc1..178b5802 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -89,7 +89,6 @@ class Client(object): def load_client_config(self, path): from .serving_client import PredictorClient from .serving_client import PredictorRes - from .serving_client import PredictorResBatch model_conf = m_config.GeneralModelConfig() f = open(path, 'r') model_conf = google.protobuf.text_format.Merge( @@ -100,7 +99,6 @@ class Client(object): # get feed shapes, feed types # map feed names to index self.result_handle_ = PredictorRes() - self.result_batch_handle_ = PredictorResBatch() self.client_handle_ = PredictorClient() self.client_handle_.init(path) read_env_flags = ["profile_client", "profile_server"] @@ -205,20 +203,21 @@ class Client(object): if key in self.fetch_names_: fetch_names.append(key) - result_batch = self.result_batch_handle_ + result_batch = self.result_handle_ res = self.client_handle_.batch_predict( float_slot_batch, float_feed_names, int_slot_batch, int_feed_names, fetch_names, result_batch, self.pid) result_map_batch = [] for index in range(batch_size): - result = result_batch.at(index) result_map = {} for i, name in enumerate(fetch_names): if self.fetch_names_to_type_[name] == int_type: - result_map[name] = result.get_int64_by_name(name)[0] + result_map[name] = result_batch.get_int64_by_name(name)[ + index] elif self.fetch_names_to_type_[name] == float_type: - result_map[name] = result.get_float_by_name(name)[0] + result_map[name] = result_batch.get_float_by_name(name)[ + index] result_map_batch.append(result_map) return result_map_batch -- GitLab