提交 dd8c2944 编写于 作者: G gongweibao

fix coverage test=develop

上级 72352699
...@@ -32,5 +32,5 @@ op_seq_maker.add_op(response_op) ...@@ -32,5 +32,5 @@ op_seq_maker.add_op(response_op)
server = MultiLangServer() server = MultiLangServer()
server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_op_sequence(op_seq_maker.get_op_sequence())
server.load_model_config(sys.argv[1]) server.load_model_config(sys.argv[1])
server.prepare_server(workdir="test19293", port=19393, device="gpu") server.prepare_server(workdir="test19293", port=19293, device="gpu")
server.run_server() server.run_server()
...@@ -398,7 +398,8 @@ class MultiLangClient(object): ...@@ -398,7 +398,8 @@ class MultiLangClient(object):
def _load_client_config(self): def _load_client_config(self):
req= pb2.EmptyRequest() req= pb2.EmptyRequest()
self._config = self.stub_.get_config(req) self._config = self.stub_.get_config(req)
self._parse_model_config(config.proto_txt) self._parse_model_config(self._config.proto_txt)
#print("config:", self._config)
def connect(self, endpoint, use_remote_config=True): def connect(self, endpoint, use_remote_config=True):
self.channel_ = grpc.insecure_channel(endpoint[0]) #TODO self.channel_ = grpc.insecure_channel(endpoint[0]) #TODO
......
...@@ -29,11 +29,11 @@ import fcntl ...@@ -29,11 +29,11 @@ import fcntl
import numpy as np import numpy as np
import grpc import grpc
from .proto import multi_lang_general_model_service_pb2 from .proto import multi_lang_general_model_service_pb2 as pb2
import sys import sys
sys.path.append( sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto')) os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto'))
from .proto import multi_lang_general_model_service_pb2_grpc from .proto import multi_lang_general_model_service_pb2_grpc as grpc_pb2
from multiprocessing import Pool, Process from multiprocessing import Pool, Process
from concurrent import futures from concurrent import futures
...@@ -485,7 +485,7 @@ class Server(object): ...@@ -485,7 +485,7 @@ class Server(object):
class MultiLangServerService( class MultiLangServerService(
multi_lang_general_model_service_pb2_grpc.MultiLangGeneralModelService): grpc_pb2.MultiLangGeneralModelService):
def __init__(self, model_config_path, endpoints): def __init__(self, model_config_path, endpoints):
from paddle_serving_client import Client from paddle_serving_client import Client
...@@ -560,12 +560,12 @@ class MultiLangServerService( ...@@ -560,12 +560,12 @@ class MultiLangServerService(
return feed_batch, fetch_names, is_python return feed_batch, fetch_names, is_python
def _pack_resp_package(self, result, fetch_names, is_python, tag): def _pack_resp_package(self, result, fetch_names, is_python, tag):
resp = multi_lang_general_model_service_pb2.Response() resp = pb2.Response()
# Only one model is supported temporarily # Only one model is supported temporarily
model_output = multi_lang_general_model_service_pb2.ModelOutput() model_output = pb2.ModelOutput()
inst = multi_lang_general_model_service_pb2.FetchInst() inst = pb2.FetchInst()
for idx, name in enumerate(fetch_names): for idx, name in enumerate(fetch_names):
tensor = multi_lang_general_model_service_pb2.Tensor() tensor = pb2.Tensor()
v_type = self.fetch_types_[name] v_type = self.fetch_types_[name]
if is_python: if is_python:
tensor.data = result[name].tobytes() tensor.data = result[name].tobytes()
...@@ -603,7 +603,7 @@ class MultiLangServerService( ...@@ -603,7 +603,7 @@ class MultiLangServerService(
print("invalid value:{} of {}".format(max_batch_size, key)) print("invalid value:{} of {}".format(max_batch_size, key))
response = pb2.ServingConfig() response = pb2.ServingConfig()
response.proto_txt = self.proto_txt response.proto_txt = self._proto_txt
response.max_batch_size = self._max_batch_size response.max_batch_size = self._max_batch_size
return response return response
...@@ -651,7 +651,7 @@ class MultiLangServer(object): ...@@ -651,7 +651,7 @@ class MultiLangServer(object):
p_bserver.start() p_bserver.start()
server = grpc.server( server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.worker_num_)) futures.ThreadPoolExecutor(max_workers=self.worker_num_))
multi_lang_general_model_service_pb2_grpc.add_MultiLangGeneralModelServiceServicer_to_server( grpc_pb2.add_MultiLangGeneralModelServiceServicer_to_server(
MultiLangServerService(self.model_config_path_, MultiLangServerService(self.model_config_path_,
["0.0.0.0:{}".format(self.port_list_[0])]), ["0.0.0.0:{}".format(self.port_list_[0])]),
server) server)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册