diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index c3da5cd9e5f031287efdc4f7b5c05abf550dc761..2ba0e6e647d58b59dbaacb3153c9fd6eab722bf8 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -501,6 +501,8 @@ class MultiLangClient(object): def _unpack_resp(self, resp, fetch, is_python, need_variant_tag): tag = resp.tag + if resp.brpc_predict_error: + return None if not need_variant_tag else [None, tag] multi_result_map = {} for model_result in resp.outputs: inst = model_result.insts[0] diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index b1df969afac18e3e77b36fc4d3e7fc7f08f5e28f..81c890875d1b2a229f570bda2706d31bce6114fc 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -518,9 +518,14 @@ class MultiLangServerService( return feed_batch, fetch_names, is_python def _pack_resp_package(self, results, fetch_names, is_python, tag): + resp = multi_lang_general_model_service_pb2.Response() + resp.tag = tag + if results is None: + resp.brpc_predict_error = True + return + resp.brpc_predict_error = False if not self.is_multi_model_: results = {'general_infer_0': results} - resp = multi_lang_general_model_service_pb2.Response() for model_name, model_result in results.items(): model_output = multi_lang_general_model_service_pb2.ModelOutput() inst = multi_lang_general_model_service_pb2.FetchInst() @@ -546,7 +551,6 @@ class MultiLangServerService( model_output.insts.append(inst) model_output.engine_name = model_name resp.outputs.append(model_output) - resp.tag = tag return resp def inference(self, request, context): diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index 2f264c551d19fe2981c35bfc2277242ef431a8c2..9da5f79658d5fd2e44bc14e972d560d4c35daf62 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -567,9 +567,14 @@ class MultiLangServerService( return feed_batch, fetch_names, is_python def _pack_resp_package(self, results, fetch_names, is_python, tag): + resp = multi_lang_general_model_service_pb2.Response() + resp.tag = tag + if results is None: + resp.brpc_predict_error = True + return + resp.brpc_predict_error = False if not self.is_multi_model_: results = {'general_infer_0': results} - resp = multi_lang_general_model_service_pb2.Response() for model_name, model_result in results.items(): model_output = multi_lang_general_model_service_pb2.ModelOutput() inst = multi_lang_general_model_service_pb2.FetchInst() @@ -595,7 +600,6 @@ class MultiLangServerService( model_output.insts.append(inst) model_output.engine_name = model_name resp.outputs.append(model_output) - resp.tag = tag return resp def inference(self, request, context):