diff --git a/cmake/paddlepaddle.cmake b/cmake/paddlepaddle.cmake index 0e202d3b06537646e489510c781cf125e87e3e07..82d35932a0240a3bd230c0c2d5072899ed9fa230 100644 --- a/cmake/paddlepaddle.cmake +++ b/cmake/paddlepaddle.cmake @@ -136,8 +136,8 @@ if (WITH_TRT) endif() if (WITH_LITE) - ADD_LIBRARY(paddle_api_full_bundled STATIC IMPORTED GLOBAL) - SET_PROPERTY(TARGET paddle_api_full_bundled PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/lite/cxx/lib/libpaddle_api_full_bundled.a) + ADD_LIBRARY(paddle_full_api_shared STATIC IMPORTED GLOBAL) + SET_PROPERTY(TARGET paddle_full_api_shared PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/lite/cxx/lib/libpaddle_full_api_shared.so) if (WITH_XPU) ADD_LIBRARY(xpuapi SHARED IMPORTED GLOBAL) @@ -157,7 +157,7 @@ LIST(APPEND paddle_depend_libs xxhash) if(WITH_LITE) - LIST(APPEND paddle_depend_libs paddle_api_full_bundled) + LIST(APPEND paddle_depend_libs paddle_full_api_shared) if(WITH_XPU) LIST(APPEND paddle_depend_libs xpuapi xpurt) endif() diff --git a/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h b/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h index 92408cdacc581f7f9323840b87518df8ab8136ed..b3db6e1ad03d1822155918f9eb8714b6285972d1 100644 --- a/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h +++ b/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h @@ -128,20 +128,22 @@ class FluidArmAnalysisCore : public FluidFamilyCore { config.DisableGpu(); config.SetCpuMathLibraryNumThreads(1); - if (params.enable_memory_optimization()) { - config.EnableMemoryOptim(); + if (params.use_lite()) { + config.EnableLiteEngine(PrecisionType::kFloat32, true); } - if (params.enable_memory_optimization()) { - config.EnableMemoryOptim(); + if (params.use_xpu()) { + config.EnableXpu(2 * 1024 * 1024); } - if (params.use_lite()) { - config.EnableLiteEngine(PrecisionType::kFloat32, true); + if (params.enable_memory_optimization()) { + config.EnableMemoryOptim(); } - if (params.use_xpu()) { - config.EnableXpu(100); + if (params.enable_ir_optimization()) { + config.SwitchIrOptim(true); + } else { + config.SwitchIrOptim(false); } config.SwitchSpecifyInputNames(true); @@ -173,6 +175,14 @@ class FluidArmAnalysisDirCore : public FluidFamilyCore { config.SwitchSpecifyInputNames(true); config.SetCpuMathLibraryNumThreads(1); + if (params.use_lite()) { + config.EnableLiteEngine(PrecisionType::kFloat32, true); + } + + if (params.use_xpu()) { + config.EnableXpu(2 * 1024 * 1024); + } + if (params.enable_memory_optimization()) { config.EnableMemoryOptim(); } @@ -183,14 +193,6 @@ class FluidArmAnalysisDirCore : public FluidFamilyCore { config.SwitchIrOptim(false); } - if (params.use_lite()) { - config.EnableLiteEngine(PrecisionType::kFloat32, true); - } - - if (params.use_xpu()) { - config.EnableXpu(100); - } - AutoLock lock(GlobalPaddleCreateMutex::instance()); _core = CreatePredictor(config); if (NULL == _core.get()) { diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 2f3865d67d22403c38d9db21fbfb39e98de2659f..d17844991ea342e142476acececb14ac2e6ae106 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -99,15 +99,27 @@ if (SERVER) DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) elseif(WITH_LITE) - add_custom_command( - OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp - COMMAND cp -r - ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py - "server_gpu" arm - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel - DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) - add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) + if(WITH_XPU) + add_custom_command( + OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp + COMMAND cp -r + ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py + "server_gpu" arm-xpu + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel + DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) + add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) + else() + add_custom_command( + OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp + COMMAND cp -r + ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py + "server_gpu" arm + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel + DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) + add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) + endif() else() add_custom_command( OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp diff --git a/python/paddle_serving_app/local_predict.py b/python/paddle_serving_app/local_predict.py index 5a641fe6358a62b67c435e9881d481c2c5616b1f..1c49f01f22cbc23cfecb70fb36d3a72ff0991e5f 100644 --- a/python/paddle_serving_app/local_predict.py +++ b/python/paddle_serving_app/local_predict.py @@ -132,6 +132,7 @@ class LocalPredictor(object): ops_filter=[]) if use_xpu: + # 2MB l3 cache config.enable_xpu(8 * 1024 * 1024) self.predictor = create_paddle_predictor(config) diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index b39c59b0fe3f6e45e64592dc5873fa68008c8984..44402e734f3b9dd22db4ae674cf85e5cff614f8f 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -212,6 +212,7 @@ class Server(object): self.module_path = os.path.dirname(paddle_serving_server.__file__) self.cur_path = os.getcwd() self.use_local_bin = False + self.device = "cpu" self.gpuid = 0 self.use_trt = False self.use_lite = False @@ -279,6 +280,9 @@ class Server(object): "GPU not found, please check your environment or use cpu version by \"pip install paddle_serving_server\"" ) + def set_device(self, device="cpu"): + self.device = device + def set_gpuid(self, gpuid=0): self.gpuid = gpuid @@ -426,7 +430,7 @@ class Server(object): cuda_version = line.split("\"")[1] if cuda_version == "101" or cuda_version == "102" or cuda_version == "110": device_version = "serving-gpu-" + cuda_version + "-" - elif cuda_version == "arm": + elif cuda_version == "arm" or cuda_version == "arm-xpu": device_version = "serving-" + cuda_version + "-" else: device_version = "serving-gpu-cuda" + cuda_version + "-" @@ -529,7 +533,8 @@ class Server(object): else: print("Use local bin : {}".format(self.bin_path)) #self.check_cuda() - if self.use_lite: + # Todo: merge CPU and GPU code, remove device to model_toolkit + if self.device == "cpu" or self.device == "arm": command = "{} " \ "-enable_model_toolkit " \ "-inferservice_path {} " \ diff --git a/python/paddle_serving_server_gpu/serve.py b/python/paddle_serving_server_gpu/serve.py index ffa4c2336fd4307f67fd2f3578a1aa3102850ce9..057a25e483cd7c160bc7bbef8b9378f9bf08f32c 100644 --- a/python/paddle_serving_server_gpu/serve.py +++ b/python/paddle_serving_server_gpu/serve.py @@ -73,6 +73,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss server.set_lite() device = "arm" + server.set_device(device) if args.use_xpu: server.set_xpu() diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index 6e7fc2c148dab721e74a7d1719c48849bbab3405..e2c24f4068da1a6ccccaa789186cab4e2a8fa6d9 100644 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -107,6 +107,7 @@ class WebService(object): server.set_num_threads(thread_num) server.set_memory_optimize(mem_optim) server.set_ir_optimize(ir_optim) + server.set_device(device) if use_lite: server.set_lite() diff --git a/python/pipeline/local_service_handler.py b/python/pipeline/local_service_handler.py index eaa04ee01411260f82992d4327c9d8ac033b91f0..65261dfa38f20a2174dc90fea70b5296187f0044 100644 --- a/python/pipeline/local_service_handler.py +++ b/python/pipeline/local_service_handler.py @@ -249,6 +249,8 @@ class LocalServiceHandler(object): server = Server() if gpuid >= 0: server.set_gpuid(gpuid) + # TODO: support arm or arm + xpu later + server.set_device(self._device_name) server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num)