提交 72352699 编写于 作者: G gongweibao

fix coverage test=develop

上级 fb451afd
......@@ -49,8 +49,8 @@ message EmptyRequest{
};
message ServingConfig{
int32 max_batch_size = 1;
string proto_txt = 2;
required int32 max_batch_size = 1;
required string proto_txt = 2;
};
service MultiLangGeneralModelService {
......
文件模式从 100644 更改为 100755
......@@ -17,8 +17,8 @@ from paddle_serving_client import MultiLangClient
import sys
client = MultiLangClient()
client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9393"])
#client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:19293"])
import paddle
test_reader = paddle.batch(
......
......@@ -15,9 +15,9 @@
import os
import sys
from paddle_serving_server import OpMaker
from paddle_serving_server import OpSeqMaker
from paddle_serving_server import MultiLangServer
from paddle_serving_server_gpu import OpMaker
from paddle_serving_server_gpu import OpSeqMaker
from paddle_serving_server_gpu import MultiLangServer
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
......@@ -32,5 +32,5 @@ op_seq_maker.add_op(response_op)
server = MultiLangServer()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.load_model_config(sys.argv[1])
server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
server.prepare_server(workdir="test19293", port=19393, device="gpu")
server.run_server()
......@@ -395,9 +395,9 @@ class MultiLangClient(object):
self._parse_model_config(proto_txt)
def _load_client_config(self, stub):
def _load_client_config(self):
req= pb2.EmptyRequest()
self._config = self.stub_.get_client_proto_text(req)
self._config = self.stub_.get_config(req)
self._parse_model_config(config.proto_txt)
def connect(self, endpoint, use_remote_config=True):
......@@ -406,7 +406,7 @@ class MultiLangClient(object):
self.channel_)
if use_remote_config:
self._load_client_config(stub)
self._load_client_config()
def _flatten_list(self, nested_list):
for item in nested_list:
......
......@@ -459,7 +459,7 @@ class MultiLangServerService(
def _parse_model_config(self, proto_txt):
model_conf = m_config.GeneralModelConfig()
model_conf = google.protobuf.text_format.Merge(proto_txt), model_conf)
model_conf = google.protobuf.text_format.Merge(proto_txt, model_conf)
self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.feed_types_ = {}
self.feed_shapes_ = {}
......@@ -547,22 +547,7 @@ class MultiLangServerService(
return self._pack_resp_package(data, fetch_names, is_python, tag)
def get_config(self, request, context):
key = "PADDLE_SERVING_MAX_BATCH_SIZE"
max_batch_size = os.getenv(key)
if max_batch_size:
try:
max_batch_size=int(max_batch_size)
self._max_batch_size = max_batch_size
except Exception as e:
print("invalid value:{} of {}".format(max_batch_size, key))
response = pb2.ServingConfig()
response.proto_txt = self.proto_txt
response.max_batch_size = self._max_batch_size
return response
class MultiLangServer(object):
def __init__(self, worker_num=2):
self.bserver_ = Server()
......
......@@ -488,18 +488,23 @@ class MultiLangServerService(
multi_lang_general_model_service_pb2_grpc.MultiLangGeneralModelService):
def __init__(self, model_config_path, endpoints):
from paddle_serving_client import Client
self._parse_model_config(model_config_path)
path = "{}/serving_server_conf.prototxt".format(model_config_path)
with open(path, 'r') as f:
proto_txt = str(f.read())
self._parse_model_config(proto_txt)
self.bclient_ = Client()
self.bclient_.load_client_config(
"{}/serving_server_conf.prototxt".format(model_config_path))
self.bclient_.load_client_config(path)
self.bclient_.connect(endpoints)
def _parse_model_config(self, model_config_path):
self._max_batch_size = -1 # <=0:unknown
self._proto_txt = proto_txt
def _parse_model_config(self, proto_txt):
model_conf = m_config.GeneralModelConfig()
f = open("{}/serving_server_conf.prototxt".format(model_config_path),
'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
model_conf = google.protobuf.text_format.Merge(proto_txt, model_conf)
self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.feed_types_ = {}
self.feed_shapes_ = {}
......@@ -586,6 +591,23 @@ class MultiLangServerService(
feed=feed_dict, fetch=fetch_names, need_variant_tag=True)
return self._pack_resp_package(data, fetch_names, is_python, tag)
def get_config(self, request, context):
print("get request from client:", request)
key = "PADDLE_SERVING_MAX_BATCH_SIZE"
max_batch_size = os.getenv(key)
if max_batch_size:
try:
max_batch_size=int(max_batch_size)
self._max_batch_size = max_batch_size
except Exception as e:
print("invalid value:{} of {}".format(max_batch_size, key))
response = pb2.ServingConfig()
response.proto_txt = self.proto_txt
response.max_batch_size = self._max_batch_size
return response
class MultiLangServer(object):
def __init__(self, worker_num=2):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册