diff --git a/python/paddle_serving_client/client.py b/python/paddle_serving_client/client.py index eeecd9d6ebc7d489d4ce038383ecb78d8fa55464..7d723f3824c317802885e0a9a78acf45c8ec16a2 100755 --- a/python/paddle_serving_client/client.py +++ b/python/paddle_serving_client/client.py @@ -326,8 +326,8 @@ class Client(object): string_feed_names = [] string_lod_slot_batch = [] string_shape = [] - fetch_names = [] + counter = 0 # batch_size must be 1, cause batch is already in Tensor. batch_size = len(feed_batch) @@ -339,7 +339,6 @@ class Client(object): if len(fetch_names) == 0: raise ValueError( "Fetch names should not be empty or out of saved fetch list.") - return {} for i, feed_i in enumerate(feed_batch): int_slot = [] diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index b2ef5979c2ffd821936655ff2ed5182020b34eb1..346644ffb89329b419602e976b449dafc7ee3101 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -92,7 +92,7 @@ class WebService(object): f = open(file_path_list[0], 'r') model_conf = google.protobuf.text_format.Merge( str(f.read()), model_conf) - self.feed_vars = {var.name: var for var in model_conf.feed_var} + self.feed_vars = {var.alias_name: var for var in model_conf.feed_var} if len(file_path_list) > 1: model_conf = m_config.GeneralModelConfig() @@ -100,7 +100,7 @@ class WebService(object): model_conf = google.protobuf.text_format.Merge( str(f.read()), model_conf) - self.fetch_vars = {var.name: var for var in model_conf.fetch_var} + self.fetch_vars = {var.alias_name: var for var in model_conf.fetch_var} if client_config_path == None: self.client_config_path = file_path_list