diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index 3a5c07011ace961fdfb61ebf3217ab1aab375e82..2d28e92d85faab4b96cf6cc934eb6563d31ab155 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -231,6 +231,7 @@ class Server(object): self.infer_service_conf.services.extend([infer_service]) def _prepare_resource(self, workdir): + self.workdir = workdir if self.resource_conf == None: with open("{}/{}".format(workdir, self.general_model_config_fn), "w") as fout: @@ -328,10 +329,10 @@ class Server(object): os.chdir(self.module_path) need_download = False device_version = self.get_device_version() - floder_name = device_version + serving_server_version - tar_name = floder_name + ".tar.gz" + folder_name = device_version + serving_server_version + tar_name = folder_name + ".tar.gz" bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name - self.server_path = os.path.join(self.module_path, floder_name) + self.server_path = os.path.join(self.module_path, folder_name) #acquire lock version_file = open("{}/version.py".format(self.module_path), "r") @@ -357,7 +358,7 @@ class Server(object): os.remove(exe_path) raise SystemExit( 'Decompressing failed, please check your permission of {} or disk space left.'. - foemat(self.module_path)) + format(self.module_path)) finally: os.remove(tar_name) #release lock @@ -375,10 +376,10 @@ class Server(object): if not self.port_is_available(port): raise SystemExit("Prot {} is already used".format(port)) + self.set_port(port) self._prepare_resource(workdir) self._prepare_engine(self.model_config_paths, device) self._prepare_infer_service(port) - self.port = port self.workdir = workdir infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn) diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index b3fcc1b880fcbffa1da884e4b68350c1870997c1..8f859c2cebc836d50edb7904f31b4bd4f7e8d98f 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -85,9 +85,9 @@ class WebService(object): fetch_map = self.client.predict(feed=feed, fetch=fetch) for key in fetch_map: fetch_map[key] = fetch_map[key].tolist() - fetch_map = self.postprocess( + result = self.postprocess( feed=request.json["feed"], fetch=fetch, fetch_map=fetch_map) - result = {"result": fetch_map} + result = {"result": result} except ValueError: result = {"result": "Request Value Error"} return result @@ -122,7 +122,7 @@ class WebService(object): processes=1) def get_app_instance(self): - return self.app_instance + return app_instance def preprocess(self, feed=[], fetch=[]): return feed, fetch diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index 76721de8a005dfb23fbe2427671446889aa72af1..4c887f0e3048031b97eb0deda0662adeaddeb559 100644 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -50,12 +50,12 @@ class WebService(object): general_infer_op = op_maker.create('general_infer') general_response_op = op_maker.create('general_response') - op_seq_maker = serving.OpSeqMaker() + op_seq_maker = OpSeqMaker() op_seq_maker.add_op(read_op) op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_response_op) - server = serving.Server() + server = Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num)