diff --git a/CMakeLists.txt b/CMakeLists.txt index a7c7b5449551e27608f6d594e32e9786c2d2f6db..4793fa924042b3e457d71d209800ed8e71e3dd2d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -79,14 +79,17 @@ if(ANDROID OR IOS OR ARMLINUX) "Disable DSO when cross-compiling for Android and iOS" FORCE) set(WITH_AVX OFF CACHE STRING "Disable AVX when cross-compiling for Android and iOS" FORCE) - set(LITE_WITH_PYTHON OFF CACHE STRING - "Disable PYTHON when cross-compiling for Android and iOS" FORCE) set(WITH_RDMA OFF CACHE STRING "Disable RDMA when cross-compiling for Android and iOS" FORCE) set(WITH_MKL OFF CACHE STRING "Disable MKL when cross-compiling for Android and iOS" FORCE) endif() +if(ANDROID OR IOS) + set(LITE_WITH_PYTHON OFF CACHE STRING + "Disable PYTHON when cross-compiling for Android and iOS" FORCE) +endif() + set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING "A path setting third party libraries download & build directories.") diff --git a/cmake/cross_compiling/postproject.cmake b/cmake/cross_compiling/postproject.cmake index 33254df03c43c2648fb33effe491e5956edf60a9..88ac3e101a686cb49ef5a4c3b1879c15b8f7b57b 100644 --- a/cmake/cross_compiling/postproject.cmake +++ b/cmake/cross_compiling/postproject.cmake @@ -26,6 +26,8 @@ if(ANDROID) endif() if(ARMLINUX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") if(ARMLINUX_ARCH_ABI STREQUAL "armv8") set(CMAKE_CXX_FLAGS "-march=armv8-a ${CMAKE_CXX_FLAGS}") set(CMAKE_C_FLAGS "-march=armv8-a ${CMAKE_C_FLAGS}") @@ -57,7 +59,10 @@ function(check_linker_flag) endfunction() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") if (LITE_ON_TINY_PUBLISH) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math -Ofast -Os -fno-exceptions -fomit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables") + if(NOT LITE_WITH_PYTHON) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions") + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math -Ofast -Os -fomit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto -fvisibility=hidden -fvisibility-inlines-hidden -fdata-sections -ffunction-sections") check_linker_flag(-Wl,--gc-sections) endif() diff --git a/lite/CMakeLists.txt b/lite/CMakeLists.txt index 9a52cc8d4a1bb9782ba0c9a5e5ba9040cfe6abbd..f0731554df0b991c02fed2991c633f127e3249ca 100644 --- a/lite/CMakeLists.txt +++ b/lite/CMakeLists.txt @@ -47,6 +47,9 @@ if (WITH_TESTING) endif() endif() +# ----------------------------- PUBLISH ----------------------------- +# The final target for publish lite lib +add_custom_target(publish_inference) if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM) # for publish set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}") @@ -59,10 +62,31 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM) if (LITE_WITH_FPGA) set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.fpga") endif(LITE_WITH_FPGA) - message(STATUS "publish inference lib to ${INFER_LITE_PUBLISH_ROOT}") +else() + set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib") +endif() +message(STATUS "publish inference lib to ${INFER_LITE_PUBLISH_ROOT}") + +# add python lib +if (LITE_WITH_PYTHON) + add_custom_target(publish_inference_python_lib ${TARGET} + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/python/lib" + COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/python/pybind/liblite_pybind.so" "${INFER_LITE_PUBLISH_ROOT}/python/lib/lite_core.so") + add_custom_target(publish_inference_python_light_demo ${TARGET} + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/python" + COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/python/mobilenetv1_light_api.py" "${INFER_LITE_PUBLISH_ROOT}/demo/python/") + if (NOT LITE_ON_TINY_PUBLISH) + add_custom_target(publish_inference_python_full_demo ${TARGET} + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/python" + COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/python/mobilenetv1_full_api.py" "${INFER_LITE_PUBLISH_ROOT}/demo/python/") + add_dependencies(publish_inference publish_inference_python_full_demo) + endif() + add_dependencies(publish_inference_python_lib lite_pybind) + add_dependencies(publish_inference publish_inference_python_lib) + add_dependencies(publish_inference publish_inference_python_light_demo) +endif() - # The final target for publish lite lib - add_custom_target(publish_inference) +if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM) if (NOT LITE_ON_TINY_PUBLISH) # add cxx lib add_custom_target(publish_inference_cxx_lib ${TARGET} diff --git a/lite/api/python/pybind/pybind.cc b/lite/api/python/pybind/pybind.cc index 00f083f54d811d0431a1f6b7e632b8d2c49c40e4..3f73c8fbb120a8d64f3b6553ce1f306b2639226f 100644 --- a/lite/api/python/pybind/pybind.cc +++ b/lite/api/python/pybind/pybind.cc @@ -72,13 +72,15 @@ void BindLiteApi(py::module *m) { BindLiteCxxPredictor(m); #endif BindLiteLightPredictor(m); - // Global helper methods +// Global helper methods +#ifndef LITE_ON_TINY_PUBLISH m->def("create_paddle_predictor", [](const CxxConfig &config) -> std::unique_ptr { auto x = std::unique_ptr(new CxxPaddleApiImpl()); x->Init(config); return std::move(x); }); +#endif m->def("create_paddle_predictor", [](const MobileConfig &config) -> std::unique_ptr { auto x = diff --git a/lite/demo/python/mobilenetv1_full_api.py b/lite/demo/python/mobilenetv1_full_api.py new file mode 100644 index 0000000000000000000000000000000000000000..a31469e3e8da81f3753dc5d241d4ef39ac03832f --- /dev/null +++ b/lite/demo/python/mobilenetv1_full_api.py @@ -0,0 +1,67 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +''' +Paddle-Lite full python api demo +''' + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +sys.path.append('../../python/lib') + +from lite_core import * + +# Command arguments +parser = argparse.ArgumentParser() +parser.add_argument( + "--model_dir", default="", type=str, help="Non-combined Model dir path") +parser.add_argument( + "--model_file", default="", type=str, help="Model file") +parser.add_argument( + "--param_file", default="", type=str, help="Combined model param file") + +def RunModel(args): + # 1. Set config information + config = CxxConfig() + if args.model_file != '' and args.param_file != '': + config.set_model_file(args.model_file) + config.set_param_file(args.param_file) + else: + config.set_model_dir(args.model_dir) + # For x86, you can set places = [Place(TargetType.X86, PrecisionType.FP32)] + places = [Place(TargetType.ARM, PrecisionType.FP32)] + config.set_valid_places(places) + + # 2. Create paddle predictor + predictor = create_paddle_predictor(config) + + # 3. Set input data + input_tensor = predictor.get_input(0) + input_tensor.resize([1, 3, 224, 224]) + input_tensor.set_float_data([1.] * 3 * 224 * 224) + + # 4. Run model + predictor.run() + + # 5. Get output data + output_tensor = predictor.get_output(0) + print(output_tensor.shape()) + print(output_tensor.float_data()[:10]) + +if __name__ == '__main__': + args = parser.parse_args() + RunModel(args) diff --git a/lite/demo/python/mobilenetv1_light_api.py b/lite/demo/python/mobilenetv1_light_api.py new file mode 100644 index 0000000000000000000000000000000000000000..a44427092bae88aa41b3b1d0684cfcf36835b3d2 --- /dev/null +++ b/lite/demo/python/mobilenetv1_light_api.py @@ -0,0 +1,56 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +''' +Paddle-Lite light python api demo +''' + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +sys.path.append('../../python/lib') + +from lite_core import * + +# Command arguments +parser = argparse.ArgumentParser() +parser.add_argument( + "--model_dir", default="", type=str, help="Non-combined Model dir path") + +def RunModel(args): + # 1. Set config information + config = MobileConfig() + config.set_model_dir(args.model_dir) + + # 2. Create paddle predictor + predictor = create_paddle_predictor(config) + + # 3. Set input data + input_tensor = predictor.get_input(0) + input_tensor.resize([1, 3, 224, 224]) + input_tensor.set_float_data([1.] * 3 * 224 * 224) + + # 4. Run model + predictor.run() + + # 5. Get output data + output_tensor = predictor.get_output(0) + print(output_tensor.shape()) + print(output_tensor.float_data()[:10]) + +if __name__ == '__main__': + args = parser.parse_args() + RunModel(args) diff --git a/lite/tools/build.sh b/lite/tools/build.sh index 87e50fd11e839ee3dd45552ee17944a39bb5b2be..8463c1497ad8608358dcf3f5b561419c8af1d0a2 100755 --- a/lite/tools/build.sh +++ b/lite/tools/build.sh @@ -15,6 +15,7 @@ readonly NUM_PROC=${LITE_BUILD_THREADS:-4} # global variables BUILD_EXTRA=OFF BUILD_JAVA=ON +BUILD_PYTHON=OFF BUILD_DIR=$(pwd) readonly THIRDPARTY_TAR=https://paddle-inference-dist.bj.bcebos.com/PaddleLite/third-party-05b862.tar.gz @@ -84,9 +85,11 @@ function make_tiny_publish_so { fi cmake .. \ + ${PYTHON_FLAGS} \ ${CMAKE_COMMON_OPTIONS} \ -DWITH_TESTING=OFF \ -DLITE_WITH_JAVA=$BUILD_JAVA \ + -DLITE_WITH_PYTHON=$BUILD_PYTHON \ -DLITE_SHUTDOWN_LOG=ON \ -DLITE_ON_TINY_PUBLISH=ON \ -DANDROID_STL_TYPE=$android_stl \ @@ -122,9 +125,11 @@ function make_full_publish_so { prepare_workspace $root_dir $build_directory cmake $root_dir \ + ${PYTHON_FLAGS} \ ${CMAKE_COMMON_OPTIONS} \ -DWITH_TESTING=OFF \ -DLITE_WITH_JAVA=$BUILD_JAVA \ + -DLITE_WITH_PYTHON=$BUILD_PYTHON \ -DLITE_SHUTDOWN_LOG=ON \ -DANDROID_STL_TYPE=$android_stl \ -DLITE_BUILD_EXTRA=$BUILD_EXTRA \ @@ -196,6 +201,35 @@ function make_ios { cd - } +function make_cuda { + prepare_thirdparty + + root_dir=$(pwd) + build_directory=$BUILD_DIR/build_cuda + + if [ -d $build_directory ] + then + rm -rf $build_directory + fi + mkdir -p $build_directory + cd $build_directory + + prepare_workspace $root_dir $build_directory + + cmake .. -DWITH_MKL=OFF \ + -DLITE_WITH_CUDA=ON \ + -DWITH_MKLDNN=OFF \ + -DLITE_WITH_X86=OFF \ + -DLITE_WITH_PROFILE=OFF \ + -DWITH_LITE=ON \ + -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=OFF \ + -DWITH_TESTING=OFF \ + -DLITE_WITH_ARM=OFF \ + -DLITE_WITH_PYTHON=ON + + make publish_inference_python_lib -j8 + cd - +} function print_usage { set +x @@ -216,6 +250,8 @@ function print_usage { echo echo -e "optional argument:" echo -e "--build_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP)" + echo -e "--build_python: (OFF|ON); controls whether to publish python api lib (ANDROID and IOS is not supported)" + echo -e "--build_java: (OFF|ON); controls whether to publish java api lib (Only ANDROID is supported)" echo -e "--build_dir: directory for building" echo echo -e "argument choices:" @@ -269,6 +305,14 @@ function main { BUILD_EXTRA="${i#*=}" shift ;; + --build_python=*) + BUILD_PYTHON="${i#*=}" + shift + ;; + --build_java=*) + BUILD_JAVA="${i#*=}" + shift + ;; --build_dir=*) BUILD_DIR="${i#*=}" shift @@ -293,6 +337,10 @@ function main { build_model_optimize_tool shift ;; + cuda) + make_cuda + shift + ;; *) # unknown option print_usage