提交 7cf630e6 编写于 作者: D dongdaxiang

make predict adaptable to shape

上级 6e024565
...@@ -85,8 +85,10 @@ class PredictorClient { ...@@ -85,8 +85,10 @@ class PredictorClient {
int predict(const std::vector<std::vector<float>>& float_feed, int predict(const std::vector<std::vector<float>>& float_feed,
const std::vector<std::string>& float_feed_name, const std::vector<std::string>& float_feed_name,
const std::vector<std::vector<int>>& float_shape,
const std::vector<std::vector<int64_t>>& int_feed, const std::vector<std::vector<int64_t>>& int_feed,
const std::vector<std::string>& int_feed_name, const std::vector<std::string>& int_feed_name,
const std::vector<std::vector<int>>& int_shape,
const std::vector<std::string>& fetch_name, const std::vector<std::string>& fetch_name,
PredictorRes& predict_res, // NOLINT PredictorRes& predict_res, // NOLINT
const int& pid); const int& pid);
...@@ -94,8 +96,10 @@ class PredictorClient { ...@@ -94,8 +96,10 @@ class PredictorClient {
int batch_predict( int batch_predict(
const std::vector<std::vector<std::vector<float>>>& float_feed_batch, const std::vector<std::vector<std::vector<float>>>& float_feed_batch,
const std::vector<std::string>& float_feed_name, const std::vector<std::string>& float_feed_name,
const std::vector<std::vector<int>>& float_shape,
const std::vector<std::vector<std::vector<int64_t>>>& int_feed_batch, const std::vector<std::vector<std::vector<int64_t>>>& int_feed_batch,
const std::vector<std::string>& int_feed_name, const std::vector<std::string>& int_feed_name,
const std::vector<std::vector<int>>& int_shape,
const std::vector<std::string>& fetch_name, const std::vector<std::string>& fetch_name,
PredictorRes& predict_res_batch, // NOLINT PredictorRes& predict_res_batch, // NOLINT
const int& pid); const int& pid);
......
...@@ -134,8 +134,10 @@ int PredictorClient::create_predictor() { ...@@ -134,8 +134,10 @@ int PredictorClient::create_predictor() {
int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed, int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
const std::vector<std::string> &float_feed_name, const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<int64_t>> &int_feed, const std::vector<std::vector<int64_t>> &int_feed,
const std::vector<std::string> &int_feed_name, const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name, const std::vector<std::string> &fetch_name,
PredictorRes &predict_res, PredictorRes &predict_res,
const int &pid) { // NOLINT const int &pid) { // NOLINT
...@@ -164,11 +166,17 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed, ...@@ -164,11 +166,17 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
} }
int vec_idx = 0; int vec_idx = 0;
for (auto &name : float_feed_name) { for (int i = 0; i < float_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[name]; int idx = _feed_name_to_idx[float_feed_name[i]];
Tensor *tensor = tensor_vec[idx]; Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) { if (float_shape.size() == 0) {
tensor->add_shape(_shape[idx][j]); for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
} else {
for (int j = 0; j < float_shape[i].size(); ++j) {
tensor->add_shape(float_shape[i][j]);
}
} }
tensor->set_elem_type(1); tensor->set_elem_type(1);
for (int j = 0; j < float_feed[vec_idx].size(); ++j) { for (int j = 0; j < float_feed[vec_idx].size(); ++j) {
...@@ -180,11 +188,17 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed, ...@@ -180,11 +188,17 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
VLOG(2) << "feed float feed var done."; VLOG(2) << "feed float feed var done.";
vec_idx = 0; vec_idx = 0;
for (auto &name : int_feed_name) { for (int i = 0; i < int_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[name]; int idx = _feed_name_to_idx[int_feed_name[i]];
Tensor *tensor = tensor_vec[idx]; Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) { if (int_shape.size() == 0) {
tensor->add_shape(_shape[idx][j]); for (int j = 0; j < int_shape[i].size(); ++j) {
tensor->add_shape(int_shape[i][j]);
}
} else {
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
} }
tensor->set_elem_type(0); tensor->set_elem_type(0);
for (int j = 0; j < int_feed[vec_idx].size(); ++j) { for (int j = 0; j < int_feed[vec_idx].size(); ++j) {
...@@ -269,8 +283,10 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed, ...@@ -269,8 +283,10 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
int PredictorClient::batch_predict( int PredictorClient::batch_predict(
const std::vector<std::vector<std::vector<float>>> &float_feed_batch, const std::vector<std::vector<std::vector<float>>> &float_feed_batch,
const std::vector<std::string> &float_feed_name, const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<std::vector<int64_t>>> &int_feed_batch, const std::vector<std::vector<std::vector<int64_t>>> &int_feed_batch,
const std::vector<std::string> &int_feed_name, const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name, const std::vector<std::string> &fetch_name,
PredictorRes &predict_res_batch, PredictorRes &predict_res_batch,
const int &pid) { const int &pid) {
...@@ -312,11 +328,17 @@ int PredictorClient::batch_predict( ...@@ -312,11 +328,17 @@ int PredictorClient::batch_predict(
VLOG(2) << "batch [" << bi << "] int_feed_name and float_feed_name " VLOG(2) << "batch [" << bi << "] int_feed_name and float_feed_name "
<< "prepared"; << "prepared";
int vec_idx = 0; int vec_idx = 0;
for (auto &name : float_feed_name) { for (int i = 0; i < float_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[name]; int idx = _feed_name_to_idx[float_feed_name[i]];
Tensor *tensor = tensor_vec[idx]; Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) { if (float_shape.size() == float_feed_name.size()) {
tensor->add_shape(_shape[idx][j]); for (int j = 0; j < float_shape[i].size(); ++j) {
tensor->add_shape(float_shape[i][j]);
}
} else {
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
} }
tensor->set_elem_type(1); tensor->set_elem_type(1);
for (int j = 0; j < float_feed[vec_idx].size(); ++j) { for (int j = 0; j < float_feed[vec_idx].size(); ++j) {
...@@ -329,14 +351,20 @@ int PredictorClient::batch_predict( ...@@ -329,14 +351,20 @@ int PredictorClient::batch_predict(
<< "float feed value prepared"; << "float feed value prepared";
vec_idx = 0; vec_idx = 0;
for (auto &name : int_feed_name) { for (int i = 0; i < int_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[name]; int idx = _feed_name_to_idx[int_feed_name[i]];
Tensor *tensor = tensor_vec[idx]; Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) { if (int_shape.size() == int_feed_name.size()) {
tensor->add_shape(_shape[idx][j]); for (int j = 0; j < int_shape[i].size(); ++j) {
tensor->add_shape(int_shape[i][j]);
}
} else {
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
} }
tensor->set_elem_type(0); tensor->set_elem_type(0);
VLOG(3) << "feed var name " << name << " index " << vec_idx VLOG(3) << "feed var name " << float_feed_name[i] << " index " << vec_idx
<< "first data " << int_feed[vec_idx][0]; << "first data " << int_feed[vec_idx][0];
for (int j = 0; j < int_feed[vec_idx].size(); ++j) { for (int j = 0; j < int_feed[vec_idx].size(); ++j) {
tensor->add_int64_data(int_feed[vec_idx][j]); tensor->add_int64_data(int_feed[vec_idx][j]);
......
...@@ -71,15 +71,19 @@ PYBIND11_MODULE(serving_client, m) { ...@@ -71,15 +71,19 @@ PYBIND11_MODULE(serving_client, m) {
[](PredictorClient &self, [](PredictorClient &self,
const std::vector<std::vector<float>> &float_feed, const std::vector<std::vector<float>> &float_feed,
const std::vector<std::string> &float_feed_name, const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<int64_t>> &int_feed, const std::vector<std::vector<int64_t>> &int_feed,
const std::vector<std::string> &int_feed_name, const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name, const std::vector<std::string> &fetch_name,
PredictorRes &predict_res, PredictorRes &predict_res,
const int &pid) { const int &pid) {
return self.predict(float_feed, return self.predict(float_feed,
float_feed_name, float_feed_name,
float_shape,
int_feed, int_feed,
int_feed_name, int_feed_name,
int_shape,
fetch_name, fetch_name,
predict_res, predict_res,
pid); pid);
...@@ -89,16 +93,20 @@ PYBIND11_MODULE(serving_client, m) { ...@@ -89,16 +93,20 @@ PYBIND11_MODULE(serving_client, m) {
const std::vector<std::vector<std::vector<float>>> const std::vector<std::vector<std::vector<float>>>
&float_feed_batch, &float_feed_batch,
const std::vector<std::string> &float_feed_name, const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<std::vector<int64_t>>> const std::vector<std::vector<std::vector<int64_t>>>
&int_feed_batch, &int_feed_batch,
const std::vector<std::string> &int_feed_name, const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name, const std::vector<std::string> &fetch_name,
PredictorRes &predict_res_batch, PredictorRes &predict_res_batch,
const int &pid) { const int &pid) {
return self.batch_predict(float_feed_batch, return self.batch_predict(float_feed_batch,
float_feed_name, float_feed_name,
float_shape,
int_feed_batch, int_feed_batch,
int_feed_name, int_feed_name,
int_shape,
fetch_name, fetch_name,
predict_res_batch, predict_res_batch,
pid); pid);
......
...@@ -18,6 +18,7 @@ import os ...@@ -18,6 +18,7 @@ import os
from .proto import sdk_configure_pb2 as sdk from .proto import sdk_configure_pb2 as sdk
from .proto import general_model_config_pb2 as m_config from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format import google.protobuf.text_format
import numpy as np
import time import time
import sys import sys
...@@ -205,6 +206,8 @@ class Client(object): ...@@ -205,6 +206,8 @@ class Client(object):
float_slot_batch = [] float_slot_batch = []
int_feed_names = [] int_feed_names = []
float_feed_names = [] float_feed_names = []
int_shape = []
float_shape = []
fetch_names = [] fetch_names = []
counter = 0 counter = 0
batch_size = len(feed_batch) batch_size = len(feed_batch)
...@@ -221,6 +224,8 @@ class Client(object): ...@@ -221,6 +224,8 @@ class Client(object):
for i, feed_i in enumerate(feed_batch): for i, feed_i in enumerate(feed_batch):
int_slot = [] int_slot = []
float_slot = [] float_slot = []
int_shape = []
float_shape = []
for key in feed_i: for key in feed_i:
if key not in self.feed_names_: if key not in self.feed_names_:
raise ValueError("Wrong feed name: {}.".format(key)) raise ValueError("Wrong feed name: {}.".format(key))
...@@ -228,13 +233,21 @@ class Client(object): ...@@ -228,13 +233,21 @@ class Client(object):
if self.feed_types_[key] == int_type: if self.feed_types_[key] == int_type:
if i == 0: if i == 0:
int_feed_names.append(key) int_feed_names.append(key)
int_slot.append(feed_i[key]) if isinstance(feed_i[key], np.ndarray):
int_shape.append(feed_i[key].shape)
if isinstance(feed_i[key], np.ndarray):
int_slot.append(feed_i[key].tolist())
else:
int_slot.append(feed_i[key])
elif self.feed_types_[key] == float_type: elif self.feed_types_[key] == float_type:
if i == 0: if i == 0:
float_feed_names.append(key) float_feed_names.append(key)
float_slot.append(feed_i[key]) if isinstance(feed_i[key], np.ndarray):
if len(int_slot) + len(float_slot) == 0: float_shape.append(feed_i[key].shape)
raise ValueError("No feed data for predict.") if isinstance(feed_i[key], np.ndarray):
float_slot.append(feed_i[key].tolist())
else:
float_slot.append(feed_i[key])
int_slot_batch.append(int_slot) int_slot_batch.append(int_slot)
float_slot_batch.append(float_slot) float_slot_batch.append(float_slot)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册