提交 a646202f 编写于 作者: D dongdaxiang 提交者: dongdaxiang

make predict adaptable to shape

上级 72853d34
......@@ -85,8 +85,10 @@ class PredictorClient {
int predict(const std::vector<std::vector<float>>& float_feed,
const std::vector<std::string>& float_feed_name,
const std::vector<std::vector<int>>& float_shape,
const std::vector<std::vector<int64_t>>& int_feed,
const std::vector<std::string>& int_feed_name,
const std::vector<std::vector<int>>& int_shape,
const std::vector<std::string>& fetch_name,
PredictorRes& predict_res, // NOLINT
const int& pid);
......@@ -94,8 +96,10 @@ class PredictorClient {
int batch_predict(
const std::vector<std::vector<std::vector<float>>>& float_feed_batch,
const std::vector<std::string>& float_feed_name,
const std::vector<std::vector<int>>& float_shape,
const std::vector<std::vector<std::vector<int64_t>>>& int_feed_batch,
const std::vector<std::string>& int_feed_name,
const std::vector<std::vector<int>>& int_shape,
const std::vector<std::string>& fetch_name,
PredictorRes& predict_res_batch, // NOLINT
const int& pid);
......
......@@ -134,8 +134,10 @@ int PredictorClient::create_predictor() {
int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<int64_t>> &int_feed,
const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name,
PredictorRes &predict_res,
const int &pid) { // NOLINT
......@@ -164,11 +166,17 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
}
int vec_idx = 0;
for (auto &name : float_feed_name) {
int idx = _feed_name_to_idx[name];
for (int i = 0; i < float_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[float_feed_name[i]];
Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
if (float_shape.size() == 0) {
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
} else {
for (int j = 0; j < float_shape[i].size(); ++j) {
tensor->add_shape(float_shape[i][j]);
}
}
tensor->set_elem_type(1);
for (int j = 0; j < float_feed[vec_idx].size(); ++j) {
......@@ -180,11 +188,17 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
VLOG(2) << "feed float feed var done.";
vec_idx = 0;
for (auto &name : int_feed_name) {
int idx = _feed_name_to_idx[name];
for (int i = 0; i < int_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[int_feed_name[i]];
Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
if (int_shape.size() == 0) {
for (int j = 0; j < int_shape[i].size(); ++j) {
tensor->add_shape(int_shape[i][j]);
}
} else {
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
}
tensor->set_elem_type(0);
for (int j = 0; j < int_feed[vec_idx].size(); ++j) {
......@@ -269,8 +283,10 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
int PredictorClient::batch_predict(
const std::vector<std::vector<std::vector<float>>> &float_feed_batch,
const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<std::vector<int64_t>>> &int_feed_batch,
const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name,
PredictorRes &predict_res_batch,
const int &pid) {
......@@ -312,11 +328,17 @@ int PredictorClient::batch_predict(
VLOG(2) << "batch [" << bi << "] int_feed_name and float_feed_name "
<< "prepared";
int vec_idx = 0;
for (auto &name : float_feed_name) {
int idx = _feed_name_to_idx[name];
for (int i = 0; i < float_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[float_feed_name[i]];
Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
if (float_shape.size() == float_feed_name.size()) {
for (int j = 0; j < float_shape[i].size(); ++j) {
tensor->add_shape(float_shape[i][j]);
}
} else {
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
}
tensor->set_elem_type(1);
for (int j = 0; j < float_feed[vec_idx].size(); ++j) {
......@@ -329,14 +351,20 @@ int PredictorClient::batch_predict(
<< "float feed value prepared";
vec_idx = 0;
for (auto &name : int_feed_name) {
int idx = _feed_name_to_idx[name];
for (int i = 0; i < int_feed_name.size(); ++i) {
int idx = _feed_name_to_idx[int_feed_name[i]];
Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
if (int_shape.size() == int_feed_name.size()) {
for (int j = 0; j < int_shape[i].size(); ++j) {
tensor->add_shape(int_shape[i][j]);
}
} else {
for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]);
}
}
tensor->set_elem_type(0);
VLOG(3) << "feed var name " << name << " index " << vec_idx
VLOG(3) << "feed var name " << float_feed_name[i] << " index " << vec_idx
<< "first data " << int_feed[vec_idx][0];
for (int j = 0; j < int_feed[vec_idx].size(); ++j) {
tensor->add_int64_data(int_feed[vec_idx][j]);
......
......@@ -71,15 +71,19 @@ PYBIND11_MODULE(serving_client, m) {
[](PredictorClient &self,
const std::vector<std::vector<float>> &float_feed,
const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<int64_t>> &int_feed,
const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name,
PredictorRes &predict_res,
const int &pid) {
return self.predict(float_feed,
float_feed_name,
float_shape,
int_feed,
int_feed_name,
int_shape,
fetch_name,
predict_res,
pid);
......@@ -89,16 +93,20 @@ PYBIND11_MODULE(serving_client, m) {
const std::vector<std::vector<std::vector<float>>>
&float_feed_batch,
const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int>> &float_shape,
const std::vector<std::vector<std::vector<int64_t>>>
&int_feed_batch,
const std::vector<std::string> &int_feed_name,
const std::vector<std::vector<int>> &int_shape,
const std::vector<std::string> &fetch_name,
PredictorRes &predict_res_batch,
const int &pid) {
return self.batch_predict(float_feed_batch,
float_feed_name,
float_shape,
int_feed_batch,
int_feed_name,
int_shape,
fetch_name,
predict_res_batch,
pid);
......
......@@ -18,6 +18,8 @@ import os
from .proto import sdk_configure_pb2 as sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
import numpy as np
import time
import sys
int_type = 0
......@@ -203,6 +205,8 @@ class Client(object):
float_slot_batch = []
int_feed_names = []
float_feed_names = []
int_shape = []
float_shape = []
fetch_names = []
counter = 0
batch_size = len(feed_batch)
......@@ -219,6 +223,8 @@ class Client(object):
for i, feed_i in enumerate(feed_batch):
int_slot = []
float_slot = []
int_shape = []
float_shape = []
for key in feed_i:
if key not in self.feed_names_:
raise ValueError("Wrong feed name: {}.".format(key))
......@@ -226,13 +232,21 @@ class Client(object):
if self.feed_types_[key] == int_type:
if i == 0:
int_feed_names.append(key)
int_slot.append(feed_i[key])
if isinstance(feed_i[key], np.ndarray):
int_shape.append(feed_i[key].shape)
if isinstance(feed_i[key], np.ndarray):
int_slot.append(feed_i[key].tolist())
else:
int_slot.append(feed_i[key])
elif self.feed_types_[key] == float_type:
if i == 0:
float_feed_names.append(key)
float_slot.append(feed_i[key])
if len(int_slot) + len(float_slot) == 0:
raise ValueError("No feed data for predict.")
if isinstance(feed_i[key], np.ndarray):
float_shape.append(feed_i[key].shape)
if isinstance(feed_i[key], np.ndarray):
float_slot.append(feed_i[key].tolist())
else:
float_slot.append(feed_i[key])
int_slot_batch.append(int_slot)
float_slot_batch.append(float_slot)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册