diff --git a/core/general-client/include/general_model.h b/core/general-client/include/general_model.h index d2c63efd7e40063efc00b5fac4c536e2bb6df1e2..ca0b27b50e053052c69017d2932f036c95b64c03 100644 --- a/core/general-client/include/general_model.h +++ b/core/general-client/include/general_model.h @@ -85,8 +85,10 @@ class PredictorClient { int predict(const std::vector>& float_feed, const std::vector& float_feed_name, + const std::vector>& float_shape, const std::vector>& int_feed, const std::vector& int_feed_name, + const std::vector>& int_shape, const std::vector& fetch_name, PredictorRes& predict_res, // NOLINT const int& pid); @@ -94,8 +96,10 @@ class PredictorClient { int batch_predict( const std::vector>>& float_feed_batch, const std::vector& float_feed_name, + const std::vector>& float_shape, const std::vector>>& int_feed_batch, const std::vector& int_feed_name, + const std::vector>& int_shape, const std::vector& fetch_name, PredictorRes& predict_res_batch, // NOLINT const int& pid); diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index 1593f90b11532065fe43f3ae17427c3f77b5010d..947953f992d3076ed4a980048b6412abb0692223 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -134,8 +134,10 @@ int PredictorClient::create_predictor() { int PredictorClient::predict(const std::vector> &float_feed, const std::vector &float_feed_name, + const std::vector> &float_shape, const std::vector> &int_feed, const std::vector &int_feed_name, + const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res, const int &pid) { // NOLINT @@ -164,11 +166,17 @@ int PredictorClient::predict(const std::vector> &float_feed, } int vec_idx = 0; - for (auto &name : float_feed_name) { - int idx = _feed_name_to_idx[name]; + for (int i = 0; i < float_feed_name.size(); ++i) { + int idx = _feed_name_to_idx[float_feed_name[i]]; Tensor *tensor = tensor_vec[idx]; - for (int j = 0; j < _shape[idx].size(); ++j) { - tensor->add_shape(_shape[idx][j]); + if (float_shape.size() == 0) { + for (int j = 0; j < _shape[idx].size(); ++j) { + tensor->add_shape(_shape[idx][j]); + } + } else { + for (int j = 0; j < float_shape[i].size(); ++j) { + tensor->add_shape(float_shape[i][j]); + } } tensor->set_elem_type(1); for (int j = 0; j < float_feed[vec_idx].size(); ++j) { @@ -180,11 +188,17 @@ int PredictorClient::predict(const std::vector> &float_feed, VLOG(2) << "feed float feed var done."; vec_idx = 0; - for (auto &name : int_feed_name) { - int idx = _feed_name_to_idx[name]; + for (int i = 0; i < int_feed_name.size(); ++i) { + int idx = _feed_name_to_idx[int_feed_name[i]]; Tensor *tensor = tensor_vec[idx]; - for (int j = 0; j < _shape[idx].size(); ++j) { - tensor->add_shape(_shape[idx][j]); + if (int_shape.size() == 0) { + for (int j = 0; j < int_shape[i].size(); ++j) { + tensor->add_shape(int_shape[i][j]); + } + } else { + for (int j = 0; j < _shape[idx].size(); ++j) { + tensor->add_shape(_shape[idx][j]); + } } tensor->set_elem_type(0); for (int j = 0; j < int_feed[vec_idx].size(); ++j) { @@ -269,8 +283,10 @@ int PredictorClient::predict(const std::vector> &float_feed, int PredictorClient::batch_predict( const std::vector>> &float_feed_batch, const std::vector &float_feed_name, + const std::vector> &float_shape, const std::vector>> &int_feed_batch, const std::vector &int_feed_name, + const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res_batch, const int &pid) { @@ -312,11 +328,17 @@ int PredictorClient::batch_predict( VLOG(2) << "batch [" << bi << "] int_feed_name and float_feed_name " << "prepared"; int vec_idx = 0; - for (auto &name : float_feed_name) { - int idx = _feed_name_to_idx[name]; + for (int i = 0; i < float_feed_name.size(); ++i) { + int idx = _feed_name_to_idx[float_feed_name[i]]; Tensor *tensor = tensor_vec[idx]; - for (int j = 0; j < _shape[idx].size(); ++j) { - tensor->add_shape(_shape[idx][j]); + if (float_shape.size() == float_feed_name.size()) { + for (int j = 0; j < float_shape[i].size(); ++j) { + tensor->add_shape(float_shape[i][j]); + } + } else { + for (int j = 0; j < _shape[idx].size(); ++j) { + tensor->add_shape(_shape[idx][j]); + } } tensor->set_elem_type(1); for (int j = 0; j < float_feed[vec_idx].size(); ++j) { @@ -329,14 +351,20 @@ int PredictorClient::batch_predict( << "float feed value prepared"; vec_idx = 0; - for (auto &name : int_feed_name) { - int idx = _feed_name_to_idx[name]; + for (int i = 0; i < int_feed_name.size(); ++i) { + int idx = _feed_name_to_idx[int_feed_name[i]]; Tensor *tensor = tensor_vec[idx]; - for (int j = 0; j < _shape[idx].size(); ++j) { - tensor->add_shape(_shape[idx][j]); + if (int_shape.size() == int_feed_name.size()) { + for (int j = 0; j < int_shape[i].size(); ++j) { + tensor->add_shape(int_shape[i][j]); + } + } else { + for (int j = 0; j < _shape[idx].size(); ++j) { + tensor->add_shape(_shape[idx][j]); + } } tensor->set_elem_type(0); - VLOG(3) << "feed var name " << name << " index " << vec_idx + VLOG(3) << "feed var name " << float_feed_name[i] << " index " << vec_idx << "first data " << int_feed[vec_idx][0]; for (int j = 0; j < int_feed[vec_idx].size(); ++j) { tensor->add_int64_data(int_feed[vec_idx][j]); diff --git a/core/general-client/src/pybind_general_model.cpp b/core/general-client/src/pybind_general_model.cpp index 47bc6bd3308e5150ccaba29ccefc52ca6e177c64..fa95c742bad99231198154e1c7929b6645a7830a 100644 --- a/core/general-client/src/pybind_general_model.cpp +++ b/core/general-client/src/pybind_general_model.cpp @@ -71,15 +71,19 @@ PYBIND11_MODULE(serving_client, m) { [](PredictorClient &self, const std::vector> &float_feed, const std::vector &float_feed_name, + const std::vector> &float_shape, const std::vector> &int_feed, const std::vector &int_feed_name, + const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res, const int &pid) { return self.predict(float_feed, float_feed_name, + float_shape, int_feed, int_feed_name, + int_shape, fetch_name, predict_res, pid); @@ -89,16 +93,20 @@ PYBIND11_MODULE(serving_client, m) { const std::vector>> &float_feed_batch, const std::vector &float_feed_name, + const std::vector> &float_shape, const std::vector>> &int_feed_batch, const std::vector &int_feed_name, + const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res_batch, const int &pid) { return self.batch_predict(float_feed_batch, float_feed_name, + float_shape, int_feed_batch, int_feed_name, + int_shape, fetch_name, predict_res_batch, pid); diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index e0adc6e3cbe629d39a0293ba0e362d5115cf4d21..5c7fac87bcf292c3937e4d79e03ffa636d6b729a 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -18,6 +18,7 @@ import os from .proto import sdk_configure_pb2 as sdk from .proto import general_model_config_pb2 as m_config import google.protobuf.text_format +import numpy as np import time import sys @@ -205,6 +206,8 @@ class Client(object): float_slot_batch = [] int_feed_names = [] float_feed_names = [] + int_shape = [] + float_shape = [] fetch_names = [] counter = 0 batch_size = len(feed_batch) @@ -221,6 +224,8 @@ class Client(object): for i, feed_i in enumerate(feed_batch): int_slot = [] float_slot = [] + int_shape = [] + float_shape = [] for key in feed_i: if key not in self.feed_names_: raise ValueError("Wrong feed name: {}.".format(key)) @@ -228,13 +233,21 @@ class Client(object): if self.feed_types_[key] == int_type: if i == 0: int_feed_names.append(key) - int_slot.append(feed_i[key]) + if isinstance(feed_i[key], np.ndarray): + int_shape.append(feed_i[key].shape) + if isinstance(feed_i[key], np.ndarray): + int_slot.append(feed_i[key].tolist()) + else: + int_slot.append(feed_i[key]) elif self.feed_types_[key] == float_type: if i == 0: float_feed_names.append(key) - float_slot.append(feed_i[key]) - if len(int_slot) + len(float_slot) == 0: - raise ValueError("No feed data for predict.") + if isinstance(feed_i[key], np.ndarray): + float_shape.append(feed_i[key].shape) + if isinstance(feed_i[key], np.ndarray): + float_slot.append(feed_i[key].tolist()) + else: + float_slot.append(feed_i[key]) int_slot_batch.append(int_slot) float_slot_batch.append(float_slot)