diff --git a/python/examples/fit_a_line/benchmark.py b/python/examples/fit_a_line/benchmark.py index 0ddda2a095eb8542887ea592a79b16665f2daa15..5cc094dc17f2b4519e076b34d4368f0f33b8f3eb 100644 --- a/python/examples/fit_a_line/benchmark.py +++ b/python/examples/fit_a_line/benchmark.py @@ -35,7 +35,7 @@ def single_func(idx, resource): batch_size=1) start = time.time() for data in train_reader(): - fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) + fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"],batch=True) end = time.time() return [[end - start]] elif args.request == "http": diff --git a/python/paddle_serving_client/client.py b/python/paddle_serving_client/client.py index 88dcf28cc7d5f96417a70a331610718b697644bb..8f1218b84d892069cb21a6c56406b66bd8f6c26a 100755 --- a/python/paddle_serving_client/client.py +++ b/python/paddle_serving_client/client.py @@ -155,7 +155,7 @@ class Client(object): file_path_list = [] for single_model_config in model_config_path_list: if os.path.isdir(single_model_config): - file_path_list.append("{}/serving_server_conf.prototxt".format( + file_path_list.append("{}/serving_client_conf.prototxt".format( single_model_config)) elif os.path.isfile(single_model_config): file_path_list.append(single_model_config) @@ -574,7 +574,7 @@ class MultiLangClient(object): file_path_list = [] for single_model_config in model_config_path_list: if os.path.isdir(single_model_config): - file_path_list.append("{}/serving_server_conf.prototxt".format( + file_path_list.append("{}/serving_client_conf.prototxt".format( single_model_config)) elif os.path.isfile(single_model_config): file_path_list.append(single_model_config)