diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index b8fe91bb594b1f91141658afcb876f2291d4d35e..b39c59b0fe3f6e45e64592dc5873fa68008c8984 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -311,18 +311,19 @@ class Server(object): engine.static_optimization = False engine.force_update_static_cache = False engine.use_trt = self.use_trt - engine.use_lite = self.use_lite - engine.use_xpu = self.use_xpu - - - + if os.path.exists('{}/__params__'.format(model_config_path)): + suffix = "" + else: + suffix = "_DIR" + if device == "arm": + engine.use_lite = self.use_lite + engine.use_xpu = self.use_xpu if device == "cpu": - engine.type = "FLUID_CPU_ANALYSIS_DIR" + engine.type = "FLUID_CPU_ANALYSIS" + suffix elif device == "gpu": - engine.type = "FLUID_GPU_ANALYSIS_DIR" + engine.type = "FLUID_GPU_ANALYSIS" + suffix elif device == "arm": - engine.type = "FLUID_ARM_ANALYSIS_DIR" - + engine.type = "FLUID_ARM_ANALYSIS" + suffix self.model_toolkit_conf.engines.extend([engine]) def _prepare_infer_service(self, port):