diff --git a/python/examples/bert/benchmark_batch.py b/python/examples/bert/benchmark_batch.py index 9b8e301a62eb0eee161cd701555543d329c6ae83..7cedb6aa451e0e4a128f0fedbfde1a896977f601 100644 --- a/python/examples/bert/benchmark_batch.py +++ b/python/examples/bert/benchmark_batch.py @@ -53,7 +53,7 @@ def single_func(idx, resource): feed_batch.append(reader.process(dataset[bi])) b_end = time.time() if profile_flags: - print("PROFILE\tpid:{}\tbert+pre_0:{} bert_pre_1:{}".format( + print("PROFILE\tpid:{}\tbert_pre_0:{} bert_pre_1:{}".format( os.getpid(), int(round(b_start * 1000000)), int(round(b_end * 1000000)))) @@ -69,9 +69,7 @@ def single_func(idx, resource): if __name__ == '__main__': multi_thread_runner = MultiThreadRunner() - endpoint_list = [ - "127.0.0.1:9292", "127.0.0.1:9293", "127.0.0.1:9294", "127.0.0.1:9295" - ] + endpoint_list = ["127.0.0.1:9292"] result = multi_thread_runner.run(single_func, args.thread, {"endpoint": endpoint_list}) avg_cost = 0 diff --git a/python/examples/imagenet/image_http_client.py b/python/examples/imagenet/image_http_client.py index 2a2e9ea20d7e428cfe42393e2fee60035c33283d..cda0f33ac82d0bd228a22a8f438cbe1aa013eadf 100644 --- a/python/examples/imagenet/image_http_client.py +++ b/python/examples/imagenet/image_http_client.py @@ -30,7 +30,10 @@ def predict(image_path, server): req = json.dumps({"image": image, "fetch": ["score"]}) r = requests.post( server, data=req, headers={"Content-Type": "application/json"}) - print(r.json()["score"][0]) + try: + print(r.json()["score"][0]) + except ValueError: + print(r.text) return r diff --git a/python/paddle_serving_client/io/__init__.py b/python/paddle_serving_client/io/__init__.py index f1a3dcf612e34d83387163d9fea491a7dca2c579..d723795f214e22957bff49f0ddf8fd42086b8a7e 100644 --- a/python/paddle_serving_client/io/__init__.py +++ b/python/paddle_serving_client/io/__init__.py @@ -32,7 +32,7 @@ def save_model(server_model_folder, executor = Executor(place=CPUPlace()) feed_var_names = [feed_var_dict[x].name for x in feed_var_dict] - target_vars = fetch_var_dict.values() + target_vars = list(fetch_var_dict.values()) save_inference_model( server_model_folder,