提交 2d73ac0a 编写于 作者: B barrierye

make it run successful

上级 43d8782c
...@@ -76,7 +76,7 @@ class PredictorRes { ...@@ -76,7 +76,7 @@ class PredictorRes {
_variant_tag = variant_tag; _variant_tag = variant_tag;
} }
const std::string& variant_tag() { return _variant_tag; } const std::string& variant_tag() { return _variant_tag; }
int models_num() {return _models.size();} int model_num() {return _models.size();}
std::vector<ModelRes> _models; std::vector<ModelRes> _models;
......
...@@ -42,8 +42,8 @@ PYBIND11_MODULE(serving_client, m) { ...@@ -42,8 +42,8 @@ PYBIND11_MODULE(serving_client, m) {
py::return_value_policy::reference) py::return_value_policy::reference)
.def("variant_tag", .def("variant_tag",
[](PredictorRes &self) { return self.variant_tag(); }) [](PredictorRes &self) { return self.variant_tag(); })
.def("models_num", .def("model_num",
[](PredictorRes &self) {return self.models_num(); }); [](PredictorRes &self) {return self.model_num(); });
py::class_<PredictorClient>(m, "PredictorClient", py::buffer_protocol()) py::class_<PredictorClient>(m, "PredictorClient", py::buffer_protocol())
.def(py::init()) .def(py::init())
......
...@@ -66,14 +66,14 @@ int GeneralResponseOp::inference() { ...@@ -66,14 +66,14 @@ int GeneralResponseOp::inference() {
} }
const GeneralBlob *input_blob; const GeneralBlob *input_blob;
for (uint32_t i = 0; i < pre_node_names.size(); ++i) { for (uint32_t pi = 0; pi < pre_node_names.size(); ++pi) {
VLOG(2) << "pre names[" << i << "]: " VLOG(2) << "pre names[" << pi << "]: "
<< pre_node_names[i] << " (" << pre_node_names[pi] << " ("
<< pre_node_names.size() << ")"; << pre_node_names.size() << ")";
input_blob = get_depend_argument<GeneralBlob>(pre_node_names[i]); input_blob = get_depend_argument<GeneralBlob>(pre_node_names[pi]);
fprintf(stderr, "input(%s) blob address %x\n", pre_node_names[i].c_str(), input_blob); fprintf(stderr, "input(%s) blob address %x\n", pre_node_names[pi].c_str(), input_blob);
if (!input_blob) { if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op: " << pre_node_names[0]; LOG(ERROR) << "Failed mutable depended argument, op: " << pre_node_names[pi];
return -1; return -1;
} }
...@@ -81,7 +81,6 @@ int GeneralResponseOp::inference() { ...@@ -81,7 +81,6 @@ int GeneralResponseOp::inference() {
int batch_size = input_blob->GetBatchSize(); int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size; VLOG(2) << "input batch size: " << batch_size;
//TODO
ModelOutput *output = res->add_outputs(); ModelOutput *output = res->add_outputs();
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = output->add_insts(); FetchInst *fetch_inst = output->add_insts();
......
...@@ -248,14 +248,14 @@ class Client(object): ...@@ -248,14 +248,14 @@ class Client(object):
multi_result_map_batch = [] multi_result_map_batch = []
model_num = result_batch.model_num() model_num = result_batch.model_num()
for i in range(model_num): for mi in range(model_num):
result_map_batch = [] result_map_batch = []
result_map = {} result_map = {}
for i, name in enumerate(fetch_names): for i, name in enumerate(fetch_names):
if self.fetch_names_to_type_[name] == int_type: if self.fetch_names_to_type_[name] == int_type:
result_map[name] = result_batch.get_int64_by_name(i, name) result_map[name] = result_batch.get_int64_by_name(mi, name)
elif self.fetch_names_to_type_[name] == float_type: elif self.fetch_names_to_type_[name] == float_type:
result_map[name] = result_batch.get_float_by_name(i, name) result_map[name] = result_batch.get_float_by_name(mi, name)
for i in range(batch_size): for i in range(batch_size):
single_result = {} single_result = {}
for key in result_map: for key in result_map:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册