提交 715153f5 编写于 作者: M MRXLT

support change limit sizes of messages

上级 1c7f5159
...@@ -111,7 +111,7 @@ class Client(object): ...@@ -111,7 +111,7 @@ class Client(object):
self.result_handle_ = PredictorRes() self.result_handle_ = PredictorRes()
self.client_handle_ = PredictorClient() self.client_handle_ = PredictorClient()
self.client_handle_.init(path) self.client_handle_.init(path)
read_env_flags = ["profile_client", "profile_server"] read_env_flags = ["profile_client", "profile_server", "max_body_size"]
self.client_handle_.init_gflags([sys.argv[ self.client_handle_.init_gflags([sys.argv[
0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) 0]] + ["--tryfromenv=" + ",".join(read_env_flags)])
self.feed_names_ = [var.alias_name for var in model_conf.feed_var] self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
......
...@@ -89,6 +89,7 @@ class Server(object): ...@@ -89,6 +89,7 @@ class Server(object):
self.num_threads = 4 self.num_threads = 4
self.port = 8080 self.port = 8080
self.reload_interval_s = 10 self.reload_interval_s = 10
self.max_body_size = 64 * 1024 * 1024
self.module_path = os.path.dirname(paddle_serving_server.__file__) self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd() self.cur_path = os.getcwd()
self.use_local_bin = False self.use_local_bin = False
...@@ -100,6 +101,14 @@ class Server(object): ...@@ -100,6 +101,14 @@ class Server(object):
def set_num_threads(self, threads): def set_num_threads(self, threads):
self.num_threads = threads self.num_threads = threads
def set_max_body_size(self, body_size):
if body_size >= self.max_body_size:
self.max_body_size = body_size
else:
print(
"max_body_size is less than default value, will use default value in service."
)
def set_port(self, port): def set_port(self, port):
self.port = port self.port = port
...@@ -292,7 +301,8 @@ class Server(object): ...@@ -292,7 +301,8 @@ class Server(object):
"-resource_file {} " \ "-resource_file {} " \
"-workflow_path {} " \ "-workflow_path {} " \
"-workflow_file {} " \ "-workflow_file {} " \
"-bthread_concurrency {} ".format( "-bthread_concurrency {} " \
"-max_body_size {} ".format(
self.bin_path, self.bin_path,
self.workdir, self.workdir,
self.infer_service_fn, self.infer_service_fn,
...@@ -304,7 +314,8 @@ class Server(object): ...@@ -304,7 +314,8 @@ class Server(object):
self.resource_fn, self.resource_fn,
self.workdir, self.workdir,
self.workflow_fn, self.workflow_fn,
self.num_threads) self.num_threads,
self.max_body_size)
print("Going to Run Command") print("Going to Run Command")
print(command) print(command)
os.system(command) os.system(command)
...@@ -41,6 +41,11 @@ def parse_args(): # pylint: disable=doc-string-missing ...@@ -41,6 +41,11 @@ def parse_args(): # pylint: disable=doc-string-missing
"--device", type=str, default="cpu", help="Type of device") "--device", type=str, default="cpu", help="Type of device")
parser.add_argument( parser.add_argument(
"--mem_optim", type=bool, default=False, help="Memory optimize") "--mem_optim", type=bool, default=False, help="Memory optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=64 * 1024 * 1024,
help="Limit sizes of messages")
return parser.parse_args() return parser.parse_args()
...@@ -52,6 +57,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing ...@@ -52,6 +57,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing
workdir = args.workdir workdir = args.workdir
device = args.device device = args.device
mem_optim = args.mem_optim mem_optim = args.mem_optim
max_body_size = args.max_body_size
if model == "": if model == "":
print("You must specify your serving model") print("You must specify your serving model")
...@@ -72,6 +78,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing ...@@ -72,6 +78,7 @@ def start_standard_model(): # pylint: disable=doc-string-missing
server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num) server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim) server.set_memory_optimize(mem_optim)
server.set_max_body_size(max_body_size)
server.load_model_config(model) server.load_model_config(model)
server.prepare_server(workdir=workdir, port=port, device=device) server.prepare_server(workdir=workdir, port=port, device=device)
......
...@@ -46,6 +46,11 @@ def serve_args(): ...@@ -46,6 +46,11 @@ def serve_args():
"--name", type=str, default="None", help="Default service name") "--name", type=str, default="None", help="Default service name")
parser.add_argument( parser.add_argument(
"--mem_optim", type=bool, default=False, help="Memory optimize") "--mem_optim", type=bool, default=False, help="Memory optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=64 * 1024 * 1024,
help="Limit sizes of messages")
return parser.parse_args() return parser.parse_args()
...@@ -114,6 +119,7 @@ class Server(object): ...@@ -114,6 +119,7 @@ class Server(object):
self.num_threads = 4 self.num_threads = 4
self.port = 8080 self.port = 8080
self.reload_interval_s = 10 self.reload_interval_s = 10
self.max_body_size = 64 * 1024 * 1024
self.module_path = os.path.dirname(paddle_serving_server.__file__) self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd() self.cur_path = os.getcwd()
self.check_cuda() self.check_cuda()
...@@ -126,6 +132,14 @@ class Server(object): ...@@ -126,6 +132,14 @@ class Server(object):
def set_num_threads(self, threads): def set_num_threads(self, threads):
self.num_threads = threads self.num_threads = threads
def set_max_body_size(self, body_size):
if body_size >= self.max_body_size:
self.max_body_size = body_size
else:
print(
"max_body_size is less than default value, will use default value in service."
)
def set_port(self, port): def set_port(self, port):
self.port = port self.port = port
...@@ -324,7 +338,8 @@ class Server(object): ...@@ -324,7 +338,8 @@ class Server(object):
"-workflow_path {} " \ "-workflow_path {} " \
"-workflow_file {} " \ "-workflow_file {} " \
"-bthread_concurrency {} " \ "-bthread_concurrency {} " \
"-gpuid {} ".format( "-gpuid {} " \
"-max_body_size {} ".format(
self.bin_path, self.bin_path,
self.workdir, self.workdir,
self.infer_service_fn, self.infer_service_fn,
...@@ -337,7 +352,8 @@ class Server(object): ...@@ -337,7 +352,8 @@ class Server(object):
self.workdir, self.workdir,
self.workflow_fn, self.workflow_fn,
self.num_threads, self.num_threads,
self.gpuid,) self.gpuid,
self.max_body_size)
print("Going to Run Comand") print("Going to Run Comand")
print(command) print(command)
......
...@@ -35,6 +35,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss ...@@ -35,6 +35,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss
thread_num = args.thread thread_num = args.thread
model = args.model model = args.model
mem_optim = args.mem_optim mem_optim = args.mem_optim
max_body_size = args.max_body_size
workdir = "{}_{}".format(args.workdir, gpuid) workdir = "{}_{}".format(args.workdir, gpuid)
if model == "": if model == "":
...@@ -56,6 +57,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss ...@@ -56,6 +57,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss
server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num) server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim) server.set_memory_optimize(mem_optim)
server.set_max_body_size(max_body_size)
server.load_model_config(model) server.load_model_config(model)
server.prepare_server(workdir=workdir, port=port, device=device) server.prepare_server(workdir=workdir, port=port, device=device)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册