diff --git a/core/predictor/framework/server.cpp b/core/predictor/framework/server.cpp index 8ced6f1e9936059ada169633e21690d13bc48ae3..996ab9120a21b4719d1203a5de430fc71d89cb52 100755 --- a/core/predictor/framework/server.cpp +++ b/core/predictor/framework/server.cpp @@ -96,6 +96,7 @@ int ServerManager::start_and_wait() { LOG(ERROR) << "Failed to start Paddle Inference Server"; return -1; } + LOG(WARNING) << "Finsh start C++ PaddleServing."; _server.RunUntilAskedToQuit(); ServerManager::stop_reloader(); diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index e97da72c1f2be9c4c2f754d640ec856cf7b51f1d..68e1d19b7a2d587783fcc4d0b3b5226f616ac8a4 100755 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -199,14 +199,14 @@ def start_gpu_card_model(gpu_mode, port, args): # pylint: disable=doc-string-mi if args.container_id != None: server.set_container_id(args.container_id) + if gpu_mode == True: + server.set_gpuid(args.gpu_ids) server.load_model_config(model) server.prepare_server( workdir=workdir, port=port, device=device, use_encryption_model=args.use_encryption_model) - if gpu_mode == True: - server.set_gpuid(args.gpu_ids) server.run_server() diff --git a/python/paddle_serving_server/server.py b/python/paddle_serving_server/server.py index 919d8ba01f892a5532d0bc70f00e7035c19a305a..1559536b493a0b928cf5f06a07576a0a3c1ac024 100755 --- a/python/paddle_serving_server/server.py +++ b/python/paddle_serving_server/server.py @@ -169,7 +169,13 @@ class Server(object): self.device = device def set_gpuid(self, gpuid): - self.gpuid = gpuid + if isinstance(gpuid, int): + self.gpuid = str(gpuid) + elif isinstance(gpuid, list): + gpu_list = [str(x) for x in gpuid] + self.gpuid = ",".join(gpu_list) + else: + self.gpuid = gpuid def set_op_num(self, op_num): self.op_num = op_num diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index 87e16dcf96247049474c844de0efa09345d564cd..f514376c6e9f0b113dd63c486be42b2088c80b6d 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -105,7 +105,13 @@ class WebService(object): def set_gpus(self, gpus): print("This API will be deprecated later. Please do not use it") - self.gpus = gpus + if isinstance(gpus, int): + self.gpus = str(gpus) + elif isinstance(gpus, list): + gpu_list = [str(x) for x in gpus] + self.gpus = ",".join(gpu_list) + else: + self.gpus = gpus def default_rpc_service(self, workdir, @@ -125,7 +131,7 @@ class WebService(object): device = "gpu" server = Server() - if gpus == -1: + if gpus == -1 or gpus == "-1": if use_lite: device = "arm" else: @@ -234,7 +240,8 @@ class WebService(object): use_trt=False, gpu_multi_stream=False, op_num=None, - op_max_batch=None): + op_max_batch=None, + gpuid=-1): print("This API will be deprecated later. Please do not use it") self.workdir = workdir self.port = port @@ -251,6 +258,13 @@ class WebService(object): self.gpu_multi_stream = gpu_multi_stream self.op_num = op_num self.op_max_batch = op_max_batch + if isinstance(gpuid, int): + self.gpus = str(gpuid) + elif isinstance(gpuid, list): + gpu_list = [str(x) for x in gpuid] + self.gpus = ",".join(gpu_list) + else: + self.gpus = gpuid default_port = 12000 for i in range(1000):