From dffc643b98982909a2ec203269c2e97dfd75fb1e Mon Sep 17 00:00:00 2001 From: MRXLT Date: Thu, 13 Feb 2020 11:36:35 +0800 Subject: [PATCH] fix cmake --- cmake/paddlepaddle.cmake | 23 +++++-- core/configure/CMakeLists.txt | 19 ++++++ core/general-server/CMakeLists.txt | 2 +- python/paddle_serving_server/__init__.py | 80 +++++++++++++++++++++--- 4 files changed, 108 insertions(+), 16 deletions(-) diff --git a/cmake/paddlepaddle.cmake b/cmake/paddlepaddle.cmake index 84d8ae93..f91fe8ed 100644 --- a/cmake/paddlepaddle.cmake +++ b/cmake/paddlepaddle.cmake @@ -49,7 +49,7 @@ endif() SET(PADDLE_LIB_PATH "http://paddle-inference-lib.bj.bcebos.com/${PADDLE_LIB_VERSION}/fluid_inference.tgz") MESSAGE(STATUS "PADDLE_LIB_PATH=${PADDLE_LIB_PATH}") - +if (WITH_GPU OR WITH_MKLML) ExternalProject_Add( "extern_paddle" ${EXTERNAL_PROJECT_LOG_ARGS} @@ -62,11 +62,24 @@ ExternalProject_Add( INSTALL_COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include && ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib && - ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party && + ${CMAKE_COMMAND} -E copy ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so +) +else() +ExternalProject_Add( + "extern_paddle" + ${EXTERNAL_PROJECT_LOG_ARGS} + URL "${PADDLE_LIB_PATH}" + PREFIX "${PADDLE_SOURCES_DIR}" + DOWNLOAD_DIR "${PADDLE_DOWNLOAD_DIR}" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include && + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib && + ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party ) - -if (WITH_MKLML) - file(COPY ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 DESTINATION ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so FOLLOW_SYMLINK_CHAIN) endif() INCLUDE_DIRECTORIES(${PADDLE_INCLUDE_DIR}) diff --git a/core/configure/CMakeLists.txt b/core/configure/CMakeLists.txt index e00dc18f..e6d461d6 100644 --- a/core/configure/CMakeLists.txt +++ b/core/configure/CMakeLists.txt @@ -55,6 +55,7 @@ if (NOT CLIENT_ONLY) py_proto_compile(server_config_py_proto SRCS proto/server_configure.proto) add_custom_target(server_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_dependencies(server_config_py_proto server_config_py_proto_init) +if (NOT WITH_GPU) add_custom_command(TARGET server_config_py_proto POST_BUILD COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto @@ -66,6 +67,24 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto COMMENT "Copy generated general_model_config proto file into directory paddle_serving_server/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) +else() +add_custom_command(TARGET server_config_py_proto POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory + ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto + COMMAND cp *.py + ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto + COMMENT "Copy generated python proto into directory + paddle_serving_server_gpu/proto." + WORKING_DIRECTORY ${CMAKE_CURRENT_BINRARY_DIR}) +add_custom_command(TARGET general_model_config_py_proto POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory + ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto + COMMAND cp *.py + ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto + COMMENT "Copy generated general_model_config proto file into directory + paddle_serving_server_gpu/proto." + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) +endif() endif() diff --git a/core/general-server/CMakeLists.txt b/core/general-server/CMakeLists.txt index 204e45e1..a1b5e01f 100644 --- a/core/general-server/CMakeLists.txt +++ b/core/general-server/CMakeLists.txt @@ -31,7 +31,7 @@ if(WITH_GPU) target_link_libraries(serving ${CUDA_LIBRARIES}) endif() -if(WITH_MKL) +if(WITH_MKL OR WITH_GPU) target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) else() target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index a79980e4..e73a0606 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -16,24 +16,32 @@ import os from .proto import server_configure_pb2 as server_sdk from .proto import general_model_config_pb2 as m_config import google.protobuf.text_format +import tarfile +import paddle_serving_server as paddle_serving_server +from version import serving_server_version + class OpMaker(object): def __init__(self): - self.op_dict = {"general_infer":"GeneralInferOp", - "general_reader":"GeneralReaderOp", - "general_single_kv":"GeneralSingleKVOp", - "general_dist_kv":"GeneralDistKVOp"} + self.op_dict = { + "general_infer": "GeneralInferOp", + "general_reader": "GeneralReaderOp", + "general_single_kv": "GeneralSingleKVOp", + "general_dist_kv": "GeneralDistKVOp" + } # currently, inputs and outputs are not used # when we have OpGraphMaker, inputs and outputs are necessary def create(self, name, inputs=[], outputs=[]): if name not in self.op_dict: - raise Exception("Op name {} is not supported right now".format(name)) + raise Exception("Op name {} is not supported right now".format( + name)) node = server_sdk.DAGNode() node.name = "{}_op".format(name) node.type = self.op_dict[name] return node + class OpSeqMaker(object): def __init__(self): self.workflow = server_sdk.Workflow() @@ -53,6 +61,7 @@ class OpSeqMaker(object): workflow_conf.workflows.extend([self.workflow]) return workflow_conf + class Server(object): def __init__(self): self.server_handle_ = None @@ -72,6 +81,8 @@ class Server(object): self.num_threads = 0 self.port = 8080 self.reload_interval_s = 10 + self.module_path = os.path.dirname(paddle_serving_server.__file__) + self.cur_path = os.getcwd() def set_max_concurrency(self, concurrency): self.max_concurrency = concurrency @@ -129,7 +140,8 @@ class Server(object): def _prepare_resource(self, workdir): if self.resource_conf == None: - with open("{}/{}".format(workdir, self.general_model_config_fn), "w") as fout: + with open("{}/{}".format(workdir, self.general_model_config_fn), + "w") as fout: fout.write(str(self.model_conf)) self.resource_conf = server_sdk.ResourceConf() self.resource_conf.model_toolkit_path = workdir @@ -150,6 +162,54 @@ class Server(object): # check config here # print config here + def get_device_version(self): + avx_flag = False + mkl_flag = False + openblas_flag = False + r = os.system("cat /proc/cpuinfo | grep avx > /dev/null 2>&1") + if r == 0: + avx_flag = True + r = os.system("which mkl") + if r == 0: + mkl_flag = True + if avx_flag: + if mkl_flag: + device_version = "serving-cpu-avx-mkl-" + else: + device_version = "serving-cpu-avx-openblas-" + else: + device_version = "serving-cpu-noavx-openblas-" + return device_version + + def download_bin(self): + os.chdir(self.module_path) + need_download = False + device_version = self.get_device_version() + floder_name = device_version + serving_server_version + tar_name = floder_name + ".tar.gz" + bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name + self.server_path = os.path.join(self.module_path, floder_name) + if not os.path.exists(self.server_path): + print('Frist time run, downloading PaddleServing components ...') + r = os.system('wget ' + bin_url + ' --no-check-certificate') + if r != 0: + print('Download failed') + if os.path.exists(tar_name): + os.remove(tar_name) + else: + try: + print('Decompressing files ..') + tar = tarfile.open(tar_name) + tar.extractall() + tar.close() + except: + if os.path.exists(exe_path): + os.remove(exe_path) + finally: + os.remove(tar_name) + os.chdir(self.cur_path) + self.bin_path = self.server_path + "/serving" + def prepare_server(self, workdir=None, port=9292, device="cpu"): if workdir == None: workdir = "./tmp" @@ -176,8 +236,9 @@ class Server(object): def run_server(self): # just run server with system command # currently we do not load cube - command = "/home/xulongteng/github/Serving/build_server/core/general-server/serving" \ - " -enable_model_toolkit " \ + self.download_bin() + command = "{} " \ + "-enable_model_toolkit " \ "-inferservice_path {} " \ "-inferservice_file {} " \ "-max_concurrency {} " \ @@ -189,6 +250,7 @@ class Server(object): "-workflow_path {} " \ "-workflow_file {} " \ "-bthread_concurrency {} ".format( + self.bin_path, self.workdir, self.infer_service_fn, self.max_concurrency, @@ -201,5 +263,3 @@ class Server(object): self.workflow_fn, self.num_threads,) os.system(command) - - -- GitLab