diff --git a/core/general-client/include/general_model.h b/core/general-client/include/general_model.h index f4e4770c83e3c52f4f3cdd7de30d68bcdafb4b97..2da1e4f77e3c7c3f932e97ac75a374e8dd5852e2 100644 --- a/core/general-client/include/general_model.h +++ b/core/general-client/include/general_model.h @@ -76,7 +76,7 @@ class PredictorRes { _variant_tag = variant_tag; } const std::string& variant_tag() { return _variant_tag; } - int models_num() {return _models.size();} + int model_num() {return _models.size();} std::vector _models; diff --git a/core/general-client/src/pybind_general_model.cpp b/core/general-client/src/pybind_general_model.cpp index 1a93903dbf7f205bb5e993544af2e8eabbf0ec9a..ce5a40cf94dea77f0236015634fc7ea25c4d1d81 100644 --- a/core/general-client/src/pybind_general_model.cpp +++ b/core/general-client/src/pybind_general_model.cpp @@ -42,8 +42,8 @@ PYBIND11_MODULE(serving_client, m) { py::return_value_policy::reference) .def("variant_tag", [](PredictorRes &self) { return self.variant_tag(); }) - .def("models_num", - [](PredictorRes &self) {return self.models_num(); }); + .def("model_num", + [](PredictorRes &self) {return self.model_num(); }); py::class_(m, "PredictorClient", py::buffer_protocol()) .def(py::init()) diff --git a/core/general-server/op/general_response_op.cpp b/core/general-server/op/general_response_op.cpp index 34aa447b704275759cbd62a4868ff11477372e7f..a597fc1f4ee0ba4f8eec9ab9e6cd1dd854892c89 100644 --- a/core/general-server/op/general_response_op.cpp +++ b/core/general-server/op/general_response_op.cpp @@ -66,14 +66,14 @@ int GeneralResponseOp::inference() { } const GeneralBlob *input_blob; - for (uint32_t i = 0; i < pre_node_names.size(); ++i) { - VLOG(2) << "pre names[" << i << "]: " - << pre_node_names[i] << " (" + for (uint32_t pi = 0; pi < pre_node_names.size(); ++pi) { + VLOG(2) << "pre names[" << pi << "]: " + << pre_node_names[pi] << " (" << pre_node_names.size() << ")"; - input_blob = get_depend_argument(pre_node_names[i]); - fprintf(stderr, "input(%s) blob address %x\n", pre_node_names[i].c_str(), input_blob); + input_blob = get_depend_argument(pre_node_names[pi]); + fprintf(stderr, "input(%s) blob address %x\n", pre_node_names[pi].c_str(), input_blob); if (!input_blob) { - LOG(ERROR) << "Failed mutable depended argument, op: " << pre_node_names[0]; + LOG(ERROR) << "Failed mutable depended argument, op: " << pre_node_names[pi]; return -1; } @@ -81,7 +81,6 @@ int GeneralResponseOp::inference() { int batch_size = input_blob->GetBatchSize(); VLOG(2) << "input batch size: " << batch_size; - //TODO ModelOutput *output = res->add_outputs(); for (int i = 0; i < batch_size; ++i) { FetchInst *fetch_inst = output->add_insts(); diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 229ddbba5caaaba3121520f8ae02fa311f0e09a4..611883246437fbc48df8da501ff0185d1505250b 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -248,14 +248,14 @@ class Client(object): multi_result_map_batch = [] model_num = result_batch.model_num() - for i in range(model_num): + for mi in range(model_num): result_map_batch = [] result_map = {} for i, name in enumerate(fetch_names): if self.fetch_names_to_type_[name] == int_type: - result_map[name] = result_batch.get_int64_by_name(i, name) + result_map[name] = result_batch.get_int64_by_name(mi, name) elif self.fetch_names_to_type_[name] == float_type: - result_map[name] = result_batch.get_float_by_name(i, name) + result_map[name] = result_batch.get_float_by_name(mi, name) for i in range(batch_size): single_result = {} for key in result_map: