diff --git a/python/examples/bert/benchmark.py b/python/examples/bert/benchmark.py index baffb85d451ddf50f5a12e4429d850933c3cce5c..e14c02fe1231c1ab04bcf1fda67046ea6b3806bb 100644 --- a/python/examples/bert/benchmark.py +++ b/python/examples/bert/benchmark.py @@ -41,9 +41,7 @@ def single_func(idx, resource): fetch = ["pooled_output"] client = Client() client.load_client_config(args.model) - client.add_variant( - "var1", [resource["endpoint"][idx % len(resource["endpoint"])]], 50) - client.connect() + client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(1000): diff --git a/python/examples/bert/benchmark_batch.py b/python/examples/bert/benchmark_batch.py index a762842d49053799a32285e859794028bcf4c02a..e0f677146a47c0366a1bbafe9eff049e2671a617 100644 --- a/python/examples/bert/benchmark_batch.py +++ b/python/examples/bert/benchmark_batch.py @@ -40,9 +40,7 @@ def single_func(idx, resource): fetch = ["pooled_output"] client = Client() client.load_client_config(args.model) - client.add_variant( - "var1", [resource["endpoint"][idx % len(resource["endpoint"])]], 50) - client.connect() + client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) feed_batch = [] for bi in range(args.batch_size): feed_batch.append(reader.process(dataset[bi])) diff --git a/python/examples/bert/bert_client.py b/python/examples/bert/bert_client.py index 53c3ed932a57a1440ee281c3bad871f9141db0d3..91323bc1d309206d54451b322bc312b6f07c382a 100644 --- a/python/examples/bert/bert_client.py +++ b/python/examples/bert/bert_client.py @@ -33,8 +33,7 @@ fetch = ["pooled_output"] endpoint_list = ["127.0.0.1:9494"] client = Client() client.load_client_config(args.model) -client.add_variant("var1", endpoint_list, 50) -client.connect() +client.connect(endpoint_list) for line in fin: feed_dict = reader.process(line) diff --git a/python/examples/criteo_ctr/benchmark.py b/python/examples/criteo_ctr/benchmark.py index a1ba1193a02e0277400326fa130a341f148f634b..8be7387d6ef32d656f676d55c21e25052e403f16 100644 --- a/python/examples/criteo_ctr/benchmark.py +++ b/python/examples/criteo_ctr/benchmark.py @@ -43,9 +43,7 @@ def single_func(idx, resource): fetch = ["prob"] client = Client() client.load_client_config(args.model) - client.add_variant( - "var1", [resource["endpoint"][idx % len(resource["endpoint"])]], 50) - client.connect() + client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(1000): diff --git a/python/examples/criteo_ctr/benchmark_batch.py b/python/examples/criteo_ctr/benchmark_batch.py index ea706f5b94f9a30d3ea8d5998f1dc4e1a1886efb..47b63a6ade0c21bdc82a5c67d65b39ffc614e06c 100644 --- a/python/examples/criteo_ctr/benchmark_batch.py +++ b/python/examples/criteo_ctr/benchmark_batch.py @@ -43,9 +43,7 @@ def single_func(idx, resource): fetch = ["prob"] client = Client() client.load_client_config(args.model) - client.add_variant( - "var1", [resource["endpoint"][idx % len(resource["endpoint"])]], 50) - client.connect() + client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(1000): diff --git a/python/examples/criteo_ctr/test_client.py b/python/examples/criteo_ctr/test_client.py index 03e3557543de22af8cdfdef5f33af723a913609f..9b3681c4117d123abd490668f44e43ab9f1e855f 100644 --- a/python/examples/criteo_ctr/test_client.py +++ b/python/examples/criteo_ctr/test_client.py @@ -23,8 +23,7 @@ from paddle_serving_client.metric import auc client = Client() client.load_client_config(sys.argv[1]) -client.add_variant("var1", ["127.0.0.1:9292"], 50) -client.connect() +client.connect(["127.0.0.1:9292"]) batch = 1 buf_size = 100 diff --git a/python/examples/criteo_ctr_with_cube/benchmark.py b/python/examples/criteo_ctr_with_cube/benchmark.py index 51e35289a2968f2fb4d6d2e0fdf3edbe8c2d9060..e5bde9f996fccc41027fa6d255ca227cba212e22 100755 --- a/python/examples/criteo_ctr_with_cube/benchmark.py +++ b/python/examples/criteo_ctr_with_cube/benchmark.py @@ -30,8 +30,7 @@ args = benchmark_args() def single_func(idx, resource): client = Client() client.load_client_config('ctr_client_conf/serving_client_conf.prototxt') - client.add_variant("var1", ['127.0.0.1:9292'], 50) - client.connect() + client.connect(['127.0.0.1:9292']) batch = 1 buf_size = 100 dataset = criteo.CriteoDataset() diff --git a/python/examples/criteo_ctr_with_cube/benchmark_batch.py b/python/examples/criteo_ctr_with_cube/benchmark_batch.py index 4be1e0c5e313d4624a932ed46109919df34e1c83..b4b15892375e830486afa320151fac619aab2ba7 100755 --- a/python/examples/criteo_ctr_with_cube/benchmark_batch.py +++ b/python/examples/criteo_ctr_with_cube/benchmark_batch.py @@ -31,8 +31,7 @@ def single_func(idx, resource): client = Client() print([resource["endpoint"][idx % len(resource["endpoint"])]]) client.load_client_config('ctr_client_conf/serving_client_conf.prototxt') - client.add_variant("var1", ['127.0.0.1:9292'], 50) - client.connect() + client.connect(['127.0.0.1:9292']) batch = 1 buf_size = 100 dataset = criteo.CriteoDataset() diff --git a/python/examples/criteo_ctr_with_cube/test_client.py b/python/examples/criteo_ctr_with_cube/test_client.py index cdecaa58e8dceccc758c7ac2b0e10abe23b4a41e..bb667f885eaae7e52764fc427efa524cb345eecc 100755 --- a/python/examples/criteo_ctr_with_cube/test_client.py +++ b/python/examples/criteo_ctr_with_cube/test_client.py @@ -22,8 +22,7 @@ from paddle_serving_client.metric import auc client = Client() client.load_client_config(sys.argv[1]) -client.add_variant("var1", ['127.0.0.1:9292'], 50) -client.connect() +client.connect(['127.0.0.1:9292']) batch = 1 buf_size = 100 diff --git a/python/examples/fit_a_line/benchmark.py b/python/examples/fit_a_line/benchmark.py index e5a5398ac021b3baee077cd9abb2a23a993751c2..0ddda2a095eb8542887ea592a79b16665f2daa15 100644 --- a/python/examples/fit_a_line/benchmark.py +++ b/python/examples/fit_a_line/benchmark.py @@ -28,8 +28,7 @@ def single_func(idx, resource): if args.request == "rpc": client = Client() client.load_client_config(args.model) - client.add_variant("var1", [args.endpoint], 50) - client.connect() + client.connect([args.endpoint]) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), diff --git a/python/examples/fit_a_line/test_client.py b/python/examples/fit_a_line/test_client.py index 3bdb9e8282a3401a87e230a9f62731db1a45e139..442ed230bc3d75c9ec3b5eac160b3a53ac31cd83 100644 --- a/python/examples/fit_a_line/test_client.py +++ b/python/examples/fit_a_line/test_client.py @@ -18,8 +18,7 @@ import sys client = Client() client.load_client_config(sys.argv[1]) -client.add_variant("var1", ["127.0.0.1:9393"], 50) -client.connect() +client.connect(["127.0.0.1:9393"]) import paddle test_reader = paddle.batch( diff --git a/python/examples/imagenet/benchmark.py b/python/examples/imagenet/benchmark.py index 28ea5600212ac67826e99a14e6b7a382a1434497..ece222f74c52614100a119e49c3754e22959b7c8 100644 --- a/python/examples/imagenet/benchmark.py +++ b/python/examples/imagenet/benchmark.py @@ -36,9 +36,7 @@ def single_func(idx, resource): fetch = ["score"] client = Client() client.load_client_config(args.model) - client.add_variant( - "var1", [resource["endpoint"][idx % len(resource["endpoint"])]], 50) - client.connect() + client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(1000): diff --git a/python/examples/imagenet/benchmark_batch.py b/python/examples/imagenet/benchmark_batch.py index eeee38d6950b691d25cc0ac6308b9156fc8302d9..7477100971d1a098bb075c94f75490c16b53b862 100644 --- a/python/examples/imagenet/benchmark_batch.py +++ b/python/examples/imagenet/benchmark_batch.py @@ -41,9 +41,7 @@ def single_func(idx, resource): fetch = ["score"] client = Client() client.load_client_config(args.model) - client.add_variant( - "var1", [resource["endpoint"][idx % len(resource["endpoint"])]], 50) - client.connect() + client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(1000): if args.batch_size >= 1: diff --git a/python/examples/imagenet/image_rpc_client.py b/python/examples/imagenet/image_rpc_client.py index c840e1b125c0e6dbb98925d178b4c63f1d15af30..2367f509cece4d37d61d4a2ff2c2bfb831112e5a 100644 --- a/python/examples/imagenet/image_rpc_client.py +++ b/python/examples/imagenet/image_rpc_client.py @@ -19,8 +19,7 @@ import time client = Client() client.load_client_config(sys.argv[1]) -client.add_variant("var1", ["127.0.0.1:9295"], 50) -client.connect() +client.connect(["127.0.0.1:9295"]) reader = ImageReader() start = time.time() diff --git a/python/examples/imdb/benchmark_batch.py b/python/examples/imdb/benchmark_batch.py index 17919bdef10af5a3da7cefc8eb74e7c6e3105797..302d63352ca20bf7e455ad1a66ead22f63dbe846 100644 --- a/python/examples/imdb/benchmark_batch.py +++ b/python/examples/imdb/benchmark_batch.py @@ -35,8 +35,7 @@ def single_func(idx, resource): if args.request == "rpc": client = Client() client.load_client_config(args.model) - client.add_variant("var1", [args.endpoint], 50) - client.connect() + client.connect([args.endpoint]) for i in range(1000): if args.batch_size >= 1: feed_batch = [] diff --git a/python/examples/imdb/test_client.py b/python/examples/imdb/test_client.py index 66d56afd3211682f911c6ba09a3e8d8d9c2aea02..548a40e4931e7f0a2ea4a4e9d3c05f40e7d34426 100644 --- a/python/examples/imdb/test_client.py +++ b/python/examples/imdb/test_client.py @@ -18,8 +18,7 @@ import sys client = Client() client.load_client_config(sys.argv[1]) -client.add_variant("var1", ["127.0.0.1:9292"], 50) -client.connect() +client.connect(["127.0.0.1:9292"]) # you can define any english sentence or dataset here # This example reuses imdb reader in training, you diff --git a/python/examples/imdb/test_client_batch.py b/python/examples/imdb/test_client_batch.py index 13a12637c5fc114b700b89daacb20c230a5b4247..972b2c9609ca690542fa802f187fb30ed0467a04 100644 --- a/python/examples/imdb/test_client_batch.py +++ b/python/examples/imdb/test_client_batch.py @@ -23,8 +23,7 @@ import time def batch_predict(batch_size=4): client = Client() client.load_client_config(conf_file) - client.add_variant("var1", ["127.0.0.1:9292"], 50) - client.connect() + client.connect(["127.0.0.1:9292"]) fetch = ["acc", "cost", "prediction"] feed_batch = [] for line in sys.stdin: diff --git a/python/examples/lac/benchmark.py b/python/examples/lac/benchmark.py index 2124257acd482412186919ac7584922472b31398..53d0881ed74e5e19104a70fb93d6872141d27afd 100644 --- a/python/examples/lac/benchmark.py +++ b/python/examples/lac/benchmark.py @@ -30,8 +30,7 @@ def single_func(idx, resource): if args.request == "rpc": client = Client() client.load_client_config(args.model) - client.add_variant("var1", [args.endpoint], 50) - client.connect() + client.connect([args.endpoint]) fin = open("jieba_test.txt") for line in fin: feed_data = reader.process(line) diff --git a/python/examples/lac/lac_client.py b/python/examples/lac/lac_client.py index 2d6e250e1544c7f87eb94f989b04c6c83dfd0b88..f2a8e858ed72ac4043a2bb3162a39a2aff233043 100644 --- a/python/examples/lac/lac_client.py +++ b/python/examples/lac/lac_client.py @@ -22,8 +22,7 @@ import io client = Client() client.load_client_config(sys.argv[1]) -client.add_variant("var1", ["127.0.0.1:9280"], 50) -client.connect() +client.connect(["127.0.0.1:9280"]) reader = LACReader(sys.argv[2]) for line in sys.stdin: diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 9c69a2d64703d75aef9b6b5eb89cc4c714fdb46b..7cabdd2ff4300a0d4c6e521e714b0541e2322b67 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -85,7 +85,7 @@ class Client(object): self.feed_names_to_idx_ = {} self.rpath() self.pid = os.getpid() - self.predictor_sdk_ = SDKConfig() + self.predictor_sdk_ = None def rpath(self): lib_path = os.path.dirname(paddle_serving_client.__file__) @@ -138,13 +138,27 @@ class Client(object): return def add_variant(self, tag, cluster, variant_weight): + if self.predictor_sdk_ is None: + self.predictor_sdk_ = SDKConfig() self.predictor_sdk_.add_server_variant(tag, cluster, str(variant_weight)) - def connect(self): + def connect(self, endpoints=None): # check whether current endpoint is available # init from client config # create predictor here + if endpoints is None: + if self.predictor_sdk_ is None: + raise SystemExit( + "You must set the endpoints parameter or use add_variant function to create a variant." + ) + else: + if self.predictor_sdk_ is None: + self.add_variant('var1', endpoints, 100) + else: + print( + "endpoints({}) will not be enabled because you use the add_variant function.". + format(endpoints)) sdk_desc = self.predictor_sdk_.gen_desc() print(sdk_desc) self.client_handle_.create_predictor_by_desc(sdk_desc.SerializeToString( diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index 0957cbeaa5d518616772c6c0f5d2af2e47d9cda6..298e65e73c50241a20bbc319199afa30ac9c978b 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -54,9 +54,7 @@ class WebService(object): client_service = Client() client_service.load_client_config( "{}/serving_server_conf.prototxt".format(self.model_config)) - client_service.add_variant("var1", - ["0.0.0.0:{}".format(self.port + 1)], 100) - client_service.connect() + client_service.connect(["0.0.0.0:{}".format(self.port + 1)]) service_name = "/" + self.name + "/prediction" @app_instance.route(service_name, methods=['POST']) diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index 32e59b76249280de863a0bba298409a5a2ac7d14..22b534ddf8b8bc017685f4bf3ac67759d030bafc 100755 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -91,8 +91,7 @@ class WebService(object): client = Client() client.load_client_config("{}/serving_server_conf.prototxt".format( self.model_config)) - client.add_variant("var1", [endpoint], 100) - client.connect() + client.connect([endpoint]) while True: request_json = inputqueue.get() feed, fetch = self.preprocess(request_json, request_json["fetch"]) @@ -135,8 +134,7 @@ class WebService(object): client = Client() client.load_client_config("{}/serving_server_conf.prototxt".format( self.model_config)) - client.add_variant("var1", ["0.0.0.0:{}".format(self.port + 1)], 100) - client.connect() + client.connect(["0.0.0.0:{}".format(self.port + 1)]) self.idx = 0