diff --git a/core/general-client/include/general_model.h b/core/general-client/include/general_model.h index b5d27df5edbaf9278ecb8614e282d104347206f8..a81a0005473f3eb4039dd77aa430957e52eda687 100644 --- a/core/general-client/include/general_model.h +++ b/core/general-client/include/general_model.h @@ -227,7 +227,8 @@ class PredictorClient { const std::vector>& int_shape, const std::vector& fetch_name, PredictorRes& predict_res_batch, // NOLINT - const int& pid); + const int& pid, + const uint64_t log_id); int numpy_predict( const std::vector>>& float_feed_batch, @@ -238,7 +239,8 @@ class PredictorClient { const std::vector>& int_shape, const std::vector& fetch_name, PredictorRes& predict_res_batch, // NOLINT - const int& pid); + const int& pid, + const uint64_t log_id); private: PredictorApi _api; diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index 9f709c71045577f7b043777a7ad1528a0e2ccc28..5c2f95de8af6c0197a488c70bcb67f2893a122c6 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -144,7 +144,8 @@ int PredictorClient::batch_predict( const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res_batch, - const int &pid) { + const int &pid, + const uint64_t log_id) { int batch_size = std::max(float_feed_batch.size(), int_feed_batch.size()); predict_res_batch.clear(); @@ -162,6 +163,8 @@ int PredictorClient::batch_predict( VLOG(2) << "int feed name size: " << int_feed_name.size(); VLOG(2) << "max body size : " << brpc::fLU64::FLAGS_max_body_size; Request req; + req.set_log_id(log_id); + VLOG(2) << "(logid=" << req.log_id() << ")"; for (auto &name : fetch_name) { req.add_fetch_var_names(name); } @@ -356,7 +359,8 @@ int PredictorClient::numpy_predict( const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res_batch, - const int &pid) { + const int &pid, + const uint64_t log_id) { int batch_size = std::max(float_feed_batch.size(), int_feed_batch.size()); VLOG(2) << "batch size: " << batch_size; predict_res_batch.clear(); @@ -374,6 +378,8 @@ int PredictorClient::numpy_predict( VLOG(2) << "int feed name size: " << int_feed_name.size(); VLOG(2) << "max body size : " << brpc::fLU64::FLAGS_max_body_size; Request req; + req.set_log_id(log_id); + VLOG(2) << "(logid=" << req.log_id() << ")"; for (auto &name : fetch_name) { req.add_fetch_var_names(name); } diff --git a/core/general-client/src/pybind_general_model.cpp b/core/general-client/src/pybind_general_model.cpp index 3e065e4de1ff3c01ff6bc05cb39a2607620915b4..1e79a8d2489a9ebc2024402bada32a4be2000146 100644 --- a/core/general-client/src/pybind_general_model.cpp +++ b/core/general-client/src/pybind_general_model.cpp @@ -107,7 +107,8 @@ PYBIND11_MODULE(serving_client, m) { const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res_batch, - const int &pid) { + const int &pid, + const uint64_t log_id) { return self.batch_predict(float_feed_batch, float_feed_name, float_shape, @@ -116,7 +117,8 @@ PYBIND11_MODULE(serving_client, m) { int_shape, fetch_name, predict_res_batch, - pid); + pid, + log_id); }, py::call_guard()) .def("numpy_predict", @@ -131,7 +133,8 @@ PYBIND11_MODULE(serving_client, m) { const std::vector> &int_shape, const std::vector &fetch_name, PredictorRes &predict_res_batch, - const int &pid) { + const int &pid, + const uint64_t log_id) { return self.numpy_predict(float_feed_batch, float_feed_name, float_shape, @@ -140,7 +143,8 @@ PYBIND11_MODULE(serving_client, m) { int_shape, fetch_name, predict_res_batch, - pid); + pid, + log_id); }, py::call_guard()); } diff --git a/core/general-server/proto/general_model_service.proto b/core/general-server/proto/general_model_service.proto index 8581ecb2a2e10deced910a20ce26c2beaca956fa..e7dd5fccf54be43db8e65a9ed1112ceaece93700 100644 --- a/core/general-server/proto/general_model_service.proto +++ b/core/general-server/proto/general_model_service.proto @@ -37,6 +37,7 @@ message Request { repeated FeedInst insts = 1; repeated string fetch_var_names = 2; optional bool profile_server = 3 [ default = false ]; + required uint64 log_id = 4 [ default = 0 ]; }; message Response { diff --git a/core/pdcodegen/src/pdcodegen.cpp b/core/pdcodegen/src/pdcodegen.cpp index af4081a985ece584f82120799fc9a384f83830be..8c067401ae11fcc2f869cc7c45520d7c39be6434 100644 --- a/core/pdcodegen/src/pdcodegen.cpp +++ b/core/pdcodegen/src/pdcodegen.cpp @@ -280,6 +280,7 @@ class PdsCodeGenerator : public CodeGenerator { " baidu::rpc::ClosureGuard done_guard(done);\n" " baidu::rpc::Controller* cntl = \n" " static_cast(cntl_base);\n" + " cntl->set_log_id(request->log_id());\n" " ::baidu::paddle_serving::predictor::InferService* svr = \n" " " "::baidu::paddle_serving::predictor::InferServiceManager::instance(" @@ -317,6 +318,7 @@ class PdsCodeGenerator : public CodeGenerator { " baidu::rpc::ClosureGuard done_guard(done);\n" " baidu::rpc::Controller* cntl = \n" " static_cast(cntl_base);\n" + " cntl->set_log_id(request->log_id());\n" " ::baidu::paddle_serving::predictor::InferService* svr = \n" " " "::baidu::paddle_serving::predictor::InferServiceManager::instance(" diff --git a/core/sdk-cpp/proto/general_model_service.proto b/core/sdk-cpp/proto/general_model_service.proto index 51c0335a9db896e1260e83915de81e51451a904b..9988b298bdd22210fbe3127b9e4b57c89077f3ff 100644 --- a/core/sdk-cpp/proto/general_model_service.proto +++ b/core/sdk-cpp/proto/general_model_service.proto @@ -37,6 +37,7 @@ message Request { repeated FeedInst insts = 1; repeated string fetch_var_names = 2; optional bool profile_server = 3 [ default = false ]; + required uint64 log_id = 4 [ default = 0 ]; }; message Response { diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index cf669c54f3492fc739bedcfacc49537a5ecc545f..e4f4cceeb0a06f8ac6d20c09b19e51045c309476 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -233,7 +233,7 @@ class Client(object): # key)) pass - def predict(self, feed=None, fetch=None, need_variant_tag=False): + def predict(self, feed=None, fetch=None, need_variant_tag=False, log_id=0): self.profile_.record('py_prepro_0') if feed is None or fetch is None: @@ -319,12 +319,12 @@ class Client(object): res = self.client_handle_.numpy_predict( float_slot_batch, float_feed_names, float_shape, int_slot_batch, int_feed_names, int_shape, fetch_names, result_batch_handle, - self.pid) + self.pid, log_id) elif self.has_numpy_input == False: res = self.client_handle_.batch_predict( float_slot_batch, float_feed_names, float_shape, int_slot_batch, int_feed_names, int_shape, fetch_names, result_batch_handle, - self.pid) + self.pid, log_id) else: raise ValueError( "Please make sure the inputs are all in list type or all in numpy.array type"