提交 dffc643b 编写于 作者: M MRXLT

fix cmake

上级 62a7a986
...@@ -49,7 +49,7 @@ endif() ...@@ -49,7 +49,7 @@ endif()
SET(PADDLE_LIB_PATH "http://paddle-inference-lib.bj.bcebos.com/${PADDLE_LIB_VERSION}/fluid_inference.tgz") SET(PADDLE_LIB_PATH "http://paddle-inference-lib.bj.bcebos.com/${PADDLE_LIB_VERSION}/fluid_inference.tgz")
MESSAGE(STATUS "PADDLE_LIB_PATH=${PADDLE_LIB_PATH}") MESSAGE(STATUS "PADDLE_LIB_PATH=${PADDLE_LIB_PATH}")
if (WITH_GPU OR WITH_MKLML)
ExternalProject_Add( ExternalProject_Add(
"extern_paddle" "extern_paddle"
${EXTERNAL_PROJECT_LOG_ARGS} ${EXTERNAL_PROJECT_LOG_ARGS}
...@@ -62,11 +62,24 @@ ExternalProject_Add( ...@@ -62,11 +62,24 @@ ExternalProject_Add(
INSTALL_COMMAND INSTALL_COMMAND
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include && ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include &&
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib && ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib &&
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party ${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party &&
${CMAKE_COMMAND} -E copy ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so
)
else()
ExternalProject_Add(
"extern_paddle"
${EXTERNAL_PROJECT_LOG_ARGS}
URL "${PADDLE_LIB_PATH}"
PREFIX "${PADDLE_SOURCES_DIR}"
DOWNLOAD_DIR "${PADDLE_DOWNLOAD_DIR}"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
UPDATE_COMMAND ""
INSTALL_COMMAND
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/include ${PADDLE_INSTALL_DIR}/include &&
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/paddle/lib ${PADDLE_INSTALL_DIR}/lib &&
${CMAKE_COMMAND} -E copy_directory ${PADDLE_DOWNLOAD_DIR}/third_party ${PADDLE_INSTALL_DIR}/third_party
) )
if (WITH_MKLML)
file(COPY ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so.0 DESTINATION ${PADDLE_INSTALL_DIR}/third_party/install/mkldnn/lib/libmkldnn.so FOLLOW_SYMLINK_CHAIN)
endif() endif()
INCLUDE_DIRECTORIES(${PADDLE_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${PADDLE_INCLUDE_DIR})
......
...@@ -55,6 +55,7 @@ if (NOT CLIENT_ONLY) ...@@ -55,6 +55,7 @@ if (NOT CLIENT_ONLY)
py_proto_compile(server_config_py_proto SRCS proto/server_configure.proto) py_proto_compile(server_config_py_proto SRCS proto/server_configure.proto)
add_custom_target(server_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_custom_target(server_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(server_config_py_proto server_config_py_proto_init) add_dependencies(server_config_py_proto server_config_py_proto_init)
if (NOT WITH_GPU)
add_custom_command(TARGET server_config_py_proto POST_BUILD add_custom_command(TARGET server_config_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto
COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto
...@@ -66,6 +67,24 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD ...@@ -66,6 +67,24 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD
COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server/proto
COMMENT "Copy generated general_model_config proto file into directory paddle_serving_server/proto." COMMENT "Copy generated general_model_config proto file into directory paddle_serving_server/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else()
add_custom_command(TARGET server_config_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory
${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto
COMMAND cp *.py
${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto
COMMENT "Copy generated python proto into directory
paddle_serving_server_gpu/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINRARY_DIR})
add_custom_command(TARGET general_model_config_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory
${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto
COMMAND cp *.py
${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_server_gpu/proto
COMMENT "Copy generated general_model_config proto file into directory
paddle_serving_server_gpu/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
endif() endif()
...@@ -31,7 +31,7 @@ if(WITH_GPU) ...@@ -31,7 +31,7 @@ if(WITH_GPU)
target_link_libraries(serving ${CUDA_LIBRARIES}) target_link_libraries(serving ${CUDA_LIBRARIES})
endif() endif()
if(WITH_MKL) if(WITH_MKL OR WITH_GPU)
target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) target_link_libraries(serving -liomp5 -lmklml_intel -lmkldnn -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2)
else() else()
target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2) target_link_libraries(serving openblas -lpthread -lcrypto -lm -lrt -lssl -ldl -lz -lbz2)
......
...@@ -16,24 +16,32 @@ import os ...@@ -16,24 +16,32 @@ import os
from .proto import server_configure_pb2 as server_sdk from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format import google.protobuf.text_format
import tarfile
import paddle_serving_server as paddle_serving_server
from version import serving_server_version
class OpMaker(object): class OpMaker(object):
def __init__(self): def __init__(self):
self.op_dict = {"general_infer":"GeneralInferOp", self.op_dict = {
"general_reader":"GeneralReaderOp", "general_infer": "GeneralInferOp",
"general_single_kv":"GeneralSingleKVOp", "general_reader": "GeneralReaderOp",
"general_dist_kv":"GeneralDistKVOp"} "general_single_kv": "GeneralSingleKVOp",
"general_dist_kv": "GeneralDistKVOp"
}
# currently, inputs and outputs are not used # currently, inputs and outputs are not used
# when we have OpGraphMaker, inputs and outputs are necessary # when we have OpGraphMaker, inputs and outputs are necessary
def create(self, name, inputs=[], outputs=[]): def create(self, name, inputs=[], outputs=[]):
if name not in self.op_dict: if name not in self.op_dict:
raise Exception("Op name {} is not supported right now".format(name)) raise Exception("Op name {} is not supported right now".format(
name))
node = server_sdk.DAGNode() node = server_sdk.DAGNode()
node.name = "{}_op".format(name) node.name = "{}_op".format(name)
node.type = self.op_dict[name] node.type = self.op_dict[name]
return node return node
class OpSeqMaker(object): class OpSeqMaker(object):
def __init__(self): def __init__(self):
self.workflow = server_sdk.Workflow() self.workflow = server_sdk.Workflow()
...@@ -53,6 +61,7 @@ class OpSeqMaker(object): ...@@ -53,6 +61,7 @@ class OpSeqMaker(object):
workflow_conf.workflows.extend([self.workflow]) workflow_conf.workflows.extend([self.workflow])
return workflow_conf return workflow_conf
class Server(object): class Server(object):
def __init__(self): def __init__(self):
self.server_handle_ = None self.server_handle_ = None
...@@ -72,6 +81,8 @@ class Server(object): ...@@ -72,6 +81,8 @@ class Server(object):
self.num_threads = 0 self.num_threads = 0
self.port = 8080 self.port = 8080
self.reload_interval_s = 10 self.reload_interval_s = 10
self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd()
def set_max_concurrency(self, concurrency): def set_max_concurrency(self, concurrency):
self.max_concurrency = concurrency self.max_concurrency = concurrency
...@@ -129,7 +140,8 @@ class Server(object): ...@@ -129,7 +140,8 @@ class Server(object):
def _prepare_resource(self, workdir): def _prepare_resource(self, workdir):
if self.resource_conf == None: if self.resource_conf == None:
with open("{}/{}".format(workdir, self.general_model_config_fn), "w") as fout: with open("{}/{}".format(workdir, self.general_model_config_fn),
"w") as fout:
fout.write(str(self.model_conf)) fout.write(str(self.model_conf))
self.resource_conf = server_sdk.ResourceConf() self.resource_conf = server_sdk.ResourceConf()
self.resource_conf.model_toolkit_path = workdir self.resource_conf.model_toolkit_path = workdir
...@@ -150,6 +162,54 @@ class Server(object): ...@@ -150,6 +162,54 @@ class Server(object):
# check config here # check config here
# print config here # print config here
def get_device_version(self):
avx_flag = False
mkl_flag = False
openblas_flag = False
r = os.system("cat /proc/cpuinfo | grep avx > /dev/null 2>&1")
if r == 0:
avx_flag = True
r = os.system("which mkl")
if r == 0:
mkl_flag = True
if avx_flag:
if mkl_flag:
device_version = "serving-cpu-avx-mkl-"
else:
device_version = "serving-cpu-avx-openblas-"
else:
device_version = "serving-cpu-noavx-openblas-"
return device_version
def download_bin(self):
os.chdir(self.module_path)
need_download = False
device_version = self.get_device_version()
floder_name = device_version + serving_server_version
tar_name = floder_name + ".tar.gz"
bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name
self.server_path = os.path.join(self.module_path, floder_name)
if not os.path.exists(self.server_path):
print('Frist time run, downloading PaddleServing components ...')
r = os.system('wget ' + bin_url + ' --no-check-certificate')
if r != 0:
print('Download failed')
if os.path.exists(tar_name):
os.remove(tar_name)
else:
try:
print('Decompressing files ..')
tar = tarfile.open(tar_name)
tar.extractall()
tar.close()
except:
if os.path.exists(exe_path):
os.remove(exe_path)
finally:
os.remove(tar_name)
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"
def prepare_server(self, workdir=None, port=9292, device="cpu"): def prepare_server(self, workdir=None, port=9292, device="cpu"):
if workdir == None: if workdir == None:
workdir = "./tmp" workdir = "./tmp"
...@@ -176,8 +236,9 @@ class Server(object): ...@@ -176,8 +236,9 @@ class Server(object):
def run_server(self): def run_server(self):
# just run server with system command # just run server with system command
# currently we do not load cube # currently we do not load cube
command = "/home/xulongteng/github/Serving/build_server/core/general-server/serving" \ self.download_bin()
" -enable_model_toolkit " \ command = "{} " \
"-enable_model_toolkit " \
"-inferservice_path {} " \ "-inferservice_path {} " \
"-inferservice_file {} " \ "-inferservice_file {} " \
"-max_concurrency {} " \ "-max_concurrency {} " \
...@@ -189,6 +250,7 @@ class Server(object): ...@@ -189,6 +250,7 @@ class Server(object):
"-workflow_path {} " \ "-workflow_path {} " \
"-workflow_file {} " \ "-workflow_file {} " \
"-bthread_concurrency {} ".format( "-bthread_concurrency {} ".format(
self.bin_path,
self.workdir, self.workdir,
self.infer_service_fn, self.infer_service_fn,
self.max_concurrency, self.max_concurrency,
...@@ -201,5 +263,3 @@ class Server(object): ...@@ -201,5 +263,3 @@ class Server(object):
self.workflow_fn, self.workflow_fn,
self.num_threads,) self.num_threads,)
os.system(command) os.system(command)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册