提交 dd8c2944 编写于 作者: G gongweibao

fix coverage test=develop

上级 72352699
......@@ -32,5 +32,5 @@ op_seq_maker.add_op(response_op)
server = MultiLangServer()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.load_model_config(sys.argv[1])
server.prepare_server(workdir="test19293", port=19393, device="gpu")
server.prepare_server(workdir="test19293", port=19293, device="gpu")
server.run_server()
......@@ -398,7 +398,8 @@ class MultiLangClient(object):
def _load_client_config(self):
req= pb2.EmptyRequest()
self._config = self.stub_.get_config(req)
self._parse_model_config(config.proto_txt)
self._parse_model_config(self._config.proto_txt)
#print("config:", self._config)
def connect(self, endpoint, use_remote_config=True):
self.channel_ = grpc.insecure_channel(endpoint[0]) #TODO
......
......@@ -29,11 +29,11 @@ import fcntl
import numpy as np
import grpc
from .proto import multi_lang_general_model_service_pb2
from .proto import multi_lang_general_model_service_pb2 as pb2
import sys
sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto'))
from .proto import multi_lang_general_model_service_pb2_grpc
from .proto import multi_lang_general_model_service_pb2_grpc as grpc_pb2
from multiprocessing import Pool, Process
from concurrent import futures
......@@ -485,7 +485,7 @@ class Server(object):
class MultiLangServerService(
multi_lang_general_model_service_pb2_grpc.MultiLangGeneralModelService):
grpc_pb2.MultiLangGeneralModelService):
def __init__(self, model_config_path, endpoints):
from paddle_serving_client import Client
......@@ -560,12 +560,12 @@ class MultiLangServerService(
return feed_batch, fetch_names, is_python
def _pack_resp_package(self, result, fetch_names, is_python, tag):
resp = multi_lang_general_model_service_pb2.Response()
resp = pb2.Response()
# Only one model is supported temporarily
model_output = multi_lang_general_model_service_pb2.ModelOutput()
inst = multi_lang_general_model_service_pb2.FetchInst()
model_output = pb2.ModelOutput()
inst = pb2.FetchInst()
for idx, name in enumerate(fetch_names):
tensor = multi_lang_general_model_service_pb2.Tensor()
tensor = pb2.Tensor()
v_type = self.fetch_types_[name]
if is_python:
tensor.data = result[name].tobytes()
......@@ -603,7 +603,7 @@ class MultiLangServerService(
print("invalid value:{} of {}".format(max_batch_size, key))
response = pb2.ServingConfig()
response.proto_txt = self.proto_txt
response.proto_txt = self._proto_txt
response.max_batch_size = self._max_batch_size
return response
......@@ -651,7 +651,7 @@ class MultiLangServer(object):
p_bserver.start()
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.worker_num_))
multi_lang_general_model_service_pb2_grpc.add_MultiLangGeneralModelServiceServicer_to_server(
grpc_pb2.add_MultiLangGeneralModelServiceServicer_to_server(
MultiLangServerService(self.model_config_path_,
["0.0.0.0:{}".format(self.port_list_[0])]),
server)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册