From 5bbc210ffa6d63507ae31e780c041fd248c8f4e2 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Mon, 27 Apr 2020 20:43:15 +0800 Subject: [PATCH] fix cuda check --- python/paddle_serving_server_gpu/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index a2163a88..5fa4f010 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -171,7 +171,6 @@ class Server(object): self.max_body_size = 64 * 1024 * 1024 self.module_path = os.path.dirname(paddle_serving_server.__file__) self.cur_path = os.getcwd() - self.check_cuda() self.use_local_bin = False self.gpuid = 0 self.model_config_paths = None # for multi-model in a workflow @@ -212,9 +211,9 @@ class Server(object): def check_cuda(self): cuda_flag = False - r = os.popen("whereis libcudart.so") - r = r.read().split(":")[1] - if "cudart" in r and os.system( + r = os.popen("ldd {} | grep cudart".format(self.bin_path)) + r = r.read().split("=") + if len(r) >= 2 and "cudart" in r[1] and os.system( "ls /dev/ | grep nvidia > /dev/null") == 0: cuda_flag = True if not cuda_flag: @@ -420,6 +419,7 @@ class Server(object): time.sleep(1) else: print("Use local bin : {}".format(self.bin_path)) + self.check_cuda() command = "{} " \ "-enable_model_toolkit " \ "-inferservice_path {} " \ -- GitLab