diff --git a/python/examples/bert/benchmark_batch.py b/python/examples/bert/benchmark_batch.py index 265521d484259b0e6ea2b182dbf61e2a5cf43b8d..9b8e301a62eb0eee161cd701555543d329c6ae83 100644 --- a/python/examples/bert/benchmark_batch.py +++ b/python/examples/bert/benchmark_batch.py @@ -57,7 +57,7 @@ def single_func(idx, resource): os.getpid(), int(round(b_start * 1000000)), int(round(b_end * 1000000)))) - result = client.predict(feed_batch=feed_batch, fetch=fetch) + result = client.predict(feed=feed_batch, fetch=fetch) else: print("unsupport batch size {}".format(args.batch_size)) diff --git a/python/examples/criteo_ctr/benchmark_batch.py b/python/examples/criteo_ctr/benchmark_batch.py index ab67507355d0eba187d47ec9577eb5a3eda5dc46..1e4348c99dc0d960b1818ea6f0eb1ae2f5bd2ccb 100644 --- a/python/examples/criteo_ctr/benchmark_batch.py +++ b/python/examples/criteo_ctr/benchmark_batch.py @@ -55,7 +55,7 @@ def single_func(idx, resource): for i in range(1, 27): feed_dict["sparse_{}".format(i - 1)] = data[0][i] feed_batch.append(feed_dict) - result = client.predict(feed_batch=feed_batch, fetch=fetch) + result = client.predict(feed=feed_batch, fetch=fetch) else: print("unsupport batch size {}".format(args.batch_size)) diff --git a/python/examples/criteo_ctr_with_cube/benchmark_batch.py b/python/examples/criteo_ctr_with_cube/benchmark_batch.py index b4b15892375e830486afa320151fac619aab2ba7..df5c6b90badb36fd7e349555973ccbd7ea0a8b70 100755 --- a/python/examples/criteo_ctr_with_cube/benchmark_batch.py +++ b/python/examples/criteo_ctr_with_cube/benchmark_batch.py @@ -56,8 +56,7 @@ def single_func(idx, resource): feed_dict["embedding_{}.tmp_0".format(i - 1)] = data[0][ i] feed_batch.append(feed_dict) - result = client.batch_predict( - feed_batch=feed_batch, fetch=fetch) + result = client.predict(feed=feed_batch, fetch=fetch) else: print("unsupport batch size {}".format(args.batch_size)) diff --git a/python/examples/imagenet/benchmark_batch.py b/python/examples/imagenet/benchmark_batch.py index 915544d4f6f9d4636b14f4be92ad75ba08013389..e531425770cbf9102b7ebd2f5b082c5c4aa14e71 100644 --- a/python/examples/imagenet/benchmark_batch.py +++ b/python/examples/imagenet/benchmark_batch.py @@ -50,7 +50,7 @@ def single_func(idx, resource): img = reader.process_image(img_list[i]) img = img.reshape(-1) feed_batch.append({"image": img}) - result = client.predict(feed_batch=feed_batch, fetch=fetch) + result = client.predict(feed=feed_batch, fetch=fetch) else: print("unsupport batch size {}".format(args.batch_size)) diff --git a/python/examples/imdb/benchmark_batch.py b/python/examples/imdb/benchmark_batch.py index 3ac52ec5472ff97f5d273dc230494223f3a71907..d36704a7631e963fd51220aa3c3d9a350515ebfd 100644 --- a/python/examples/imdb/benchmark_batch.py +++ b/python/examples/imdb/benchmark_batch.py @@ -42,8 +42,7 @@ def single_func(idx, resource): for bi in range(args.batch_size): word_ids, label = imdb_dataset.get_words_and_label(line) feed_batch.append({"words": word_ids}) - result = client.predict( - feed_batch=feed_batch, fetch=["prediction"]) + result = client.predict(feed=feed_batch, fetch=["prediction"]) else: print("unsupport batch size {}".format(args.batch_size)) diff --git a/python/paddle_serving_server_gpu/serve.py b/python/paddle_serving_server_gpu/serve.py index 2cce4d4b1615584fc02aba4d70e1928083ddde62..d09efbfc8e1512ecb75b063ad760ce66e1a3159e 100644 --- a/python/paddle_serving_server_gpu/serve.py +++ b/python/paddle_serving_server_gpu/serve.py @@ -64,14 +64,22 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss def start_multi_card(args): # pylint: disable=doc-string-missing gpus = "" if args.gpu_ids == "": - if "CUDA_VISIBLE_DEVICES" in os.environ: - gpus = os.environ["CUDA_VISIBLE_DEVICES"] - else: - gpus = [] + gpus = [] else: gpus = args.gpu_ids.split(",") + if "CUDA_VISIBLE_DEVICES" in os.environ: + env_gpus = os.environ["CUDA_VISIBLE_DEVICES"].split(",") + for ids in gpus: + if int(ids) >= len(env_gpus): + print( + " Max index of gpu_ids out of range, the number of CUDA_VISIBLE_DEVICES is {}.". + format(len(env_gpus))) + exit(-1) + else: + env_gpus = [] if len(gpus) <= 0: - start_gpu_card_model(-1, 0, args) + print("gpu_ids not set, going to run cpu service.") + start_gpu_card_model(-1, -1, args) else: gpu_processes = [] for i, gpu_id in enumerate(gpus):