提交 0b09eed6 编写于 作者: B barrierye

fix bug

上级 ffd8d3d7
...@@ -38,6 +38,7 @@ message Request { ...@@ -38,6 +38,7 @@ message Request {
message Response { message Response {
repeated ModelOutput outputs = 1; repeated ModelOutput outputs = 1;
optional string tag = 2; optional string tag = 2;
optional bool brpc_predict_error = 3;
}; };
message ModelOutput { message ModelOutput {
......
...@@ -500,9 +500,9 @@ class MultiLangClient(object): ...@@ -500,9 +500,9 @@ class MultiLangClient(object):
return req return req
def _unpack_resp(self, resp, fetch, is_python, need_variant_tag): def _unpack_resp(self, resp, fetch, is_python, need_variant_tag):
tag = resp.tag
if resp.brpc_predict_error: if resp.brpc_predict_error:
return None if not need_variant_tag else [None, tag] return None
tag = resp.tag
multi_result_map = {} multi_result_map = {}
for model_result in resp.outputs: for model_result in resp.outputs:
inst = model_result.insts[0] inst = model_result.insts[0]
......
...@@ -517,12 +517,13 @@ class MultiLangServerService( ...@@ -517,12 +517,13 @@ class MultiLangServerService(
feed_batch.append(feed_dict) feed_batch.append(feed_dict)
return feed_batch, fetch_names, is_python return feed_batch, fetch_names, is_python
def _pack_resp_package(self, results, fetch_names, is_python, tag): def _pack_resp_package(self, ret, fetch_names, is_python):
resp = multi_lang_general_model_service_pb2.Response() resp = multi_lang_general_model_service_pb2.Response()
resp.tag = tag if ret is None:
if results is None:
resp.brpc_predict_error = True resp.brpc_predict_error = True
return return resp
results, tag = ret
resp.tag = tag
resp.brpc_predict_error = False resp.brpc_predict_error = False
if not self.is_multi_model_: if not self.is_multi_model_:
results = {'general_infer_0': results} results = {'general_infer_0': results}
...@@ -555,9 +556,9 @@ class MultiLangServerService( ...@@ -555,9 +556,9 @@ class MultiLangServerService(
def inference(self, request, context): def inference(self, request, context):
feed_dict, fetch_names, is_python = self._unpack_request(request) feed_dict, fetch_names, is_python = self._unpack_request(request)
data, tag = self.bclient_.predict( ret = self.bclient_.predict(
feed=feed_dict, fetch=fetch_names, need_variant_tag=True) feed=feed_dict, fetch=fetch_names, need_variant_tag=True)
return self._pack_resp_package(data, fetch_names, is_python, tag) return self._pack_resp_package(ret, fetch_names, is_python)
class MultiLangServer(object): class MultiLangServer(object):
......
...@@ -566,12 +566,13 @@ class MultiLangServerService( ...@@ -566,12 +566,13 @@ class MultiLangServerService(
feed_batch.append(feed_dict) feed_batch.append(feed_dict)
return feed_batch, fetch_names, is_python return feed_batch, fetch_names, is_python
def _pack_resp_package(self, results, fetch_names, is_python, tag): def _pack_resp_package(self, ret, fetch_names, is_python):
resp = multi_lang_general_model_service_pb2.Response() resp = multi_lang_general_model_service_pb2.Response()
resp.tag = tag if ret is None:
if results is None:
resp.brpc_predict_error = True resp.brpc_predict_error = True
return return resp
results, tag = ret
resp.tag = tag
resp.brpc_predict_error = False resp.brpc_predict_error = False
if not self.is_multi_model_: if not self.is_multi_model_:
results = {'general_infer_0': results} results = {'general_infer_0': results}
...@@ -604,9 +605,9 @@ class MultiLangServerService( ...@@ -604,9 +605,9 @@ class MultiLangServerService(
def inference(self, request, context): def inference(self, request, context):
feed_dict, fetch_names, is_python = self._unpack_request(request) feed_dict, fetch_names, is_python = self._unpack_request(request)
data, tag = self.bclient_.predict( ret = self.bclient_.predict(
feed=feed_dict, fetch=fetch_names, need_variant_tag=True) feed=feed_dict, fetch=fetch_names, need_variant_tag=True)
return self._pack_resp_package(data, fetch_names, is_python, tag) return self._pack_resp_package(ret, fetch_names, is_python)
class MultiLangServer(object): class MultiLangServer(object):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册