提交 992ca953 编写于 作者: M MRXLT

fix bug && add use_local_bin

上级 ccf35cf4
...@@ -81,11 +81,13 @@ class Server(object): ...@@ -81,11 +81,13 @@ class Server(object):
self.general_model_config_fn = "general_model.prototxt" self.general_model_config_fn = "general_model.prototxt"
self.workdir = "" self.workdir = ""
self.max_concurrency = 0 self.max_concurrency = 0
self.num_threads = 0 self.num_threads = 4
self.port = 8080 self.port = 8080
self.reload_interval_s = 10 self.reload_interval_s = 10
self.module_path = os.path.dirname(paddle_serving_server.__file__) self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd() self.cur_path = os.getcwd()
self.vlog_level = 0
self.use_local_bin = False
def set_max_concurrency(self, concurrency): def set_max_concurrency(self, concurrency):
self.max_concurrency = concurrency self.max_concurrency = concurrency
...@@ -108,6 +110,10 @@ class Server(object): ...@@ -108,6 +110,10 @@ class Server(object):
def set_memory_optimize(self, flag=False): def set_memory_optimize(self, flag=False):
self.memory_optimization = flag self.memory_optimization = flag
def set_local_bin(self, path):
self.use_local_bin = True
self.bin_path = path
def _prepare_engine(self, model_config_path, device): def _prepare_engine(self, model_config_path, device):
if self.model_toolkit_conf == None: if self.model_toolkit_conf == None:
self.model_toolkit_conf = server_sdk.ModelToolkitConf() self.model_toolkit_conf = server_sdk.ModelToolkitConf()
...@@ -242,6 +248,7 @@ class Server(object): ...@@ -242,6 +248,7 @@ class Server(object):
def run_server(self): def run_server(self):
# just run server with system command # just run server with system command
# currently we do not load cube # currently we do not load cube
if not self.use_local_bin:
self.download_bin() self.download_bin()
command = "{} " \ command = "{} " \
"-enable_model_toolkit " \ "-enable_model_toolkit " \
......
...@@ -81,11 +81,13 @@ class Server(object): ...@@ -81,11 +81,13 @@ class Server(object):
self.general_model_config_fn = "general_model.prototxt" self.general_model_config_fn = "general_model.prototxt"
self.workdir = "" self.workdir = ""
self.max_concurrency = 0 self.max_concurrency = 0
self.num_threads = 0 self.num_threads = 4
self.port = 8080 self.port = 8080
self.reload_interval_s = 10 self.reload_interval_s = 10
self.module_path = os.path.dirname(paddle_serving_server.__file__) self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd() self.cur_path = os.getcwd()
self.vlog_level = 0
self.use_local_bin = False
def set_max_concurrency(self, concurrency): def set_max_concurrency(self, concurrency):
self.max_concurrency = concurrency self.max_concurrency = concurrency
...@@ -108,6 +110,10 @@ class Server(object): ...@@ -108,6 +110,10 @@ class Server(object):
def set_memory_optimize(self, flag=False): def set_memory_optimize(self, flag=False):
self.memory_optimization = flag self.memory_optimization = flag
def set_local_bin(self, path):
self.use_local_bin = True
self.bin_path = path
def set_gpuid(self, gpuid=0): def set_gpuid(self, gpuid=0):
self.gpuid = gpuid self.gpuid = gpuid
...@@ -226,6 +232,7 @@ class Server(object): ...@@ -226,6 +232,7 @@ class Server(object):
def run_server(self): def run_server(self):
# just run server with system command # just run server with system command
# currently we do not load cube # currently we do not load cube
if not self.use_local_bin:
self.download_bin() self.download_bin()
command = "{} " \ command = "{} " \
"-enable_model_toolkit " \ "-enable_model_toolkit " \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册