diff --git a/.gitmodules b/.gitmodules
index 107036c70292cf33e945f45a8bac935dea554ece..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,12 +0,0 @@
-[submodule "third-party/gflags"]
- path = third-party/gflags
- url = https://github.com/gflags/gflags.git
-[submodule "third-party/googletest"]
- path = third-party/googletest
- url = https://github.com/google/googletest.git
-[submodule "third-party/protobuf-mobile"]
- path = third-party/protobuf-mobile
- url = https://github.com/tensor-tang/protobuf.git
-[submodule "third-party/protobuf-host"]
- path = third-party/protobuf-host
- url = https://github.com/protocolbuffers/protobuf.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
deleted file mode 100644
index 3643379acb32320c710e786c18c37424313e726e..0000000000000000000000000000000000000000
--- a/CMakeLists.txt
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-cmake_minimum_required(VERSION 3.0)
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
-include(lite_utils)
-
-lite_option(WITH_PADDLE_MOBILE "Use the paddle-mobile legacy build" OFF)
-if (WITH_PADDLE_MOBILE)
- add_subdirectory(mobile)
- return()
-endif(WITH_PADDLE_MOBILE)
-
-set(PADDLE_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
-set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
-set(CMAKE_CXX_STANDARD 11)
-
-include(system)
-include(cross_compiling/preproject)
-
-project(paddle CXX C)
-message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}, version: "
- "${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}")
-message(STATUS "C compiler: ${CMAKE_C_COMPILER}, version: "
- "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
-message(STATUS "AR tools: ${CMAKE_AR}")
-
-if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- find_package(CUDA QUIET)
-endif()
-find_package(Git REQUIRED)
-find_package(Threads REQUIRED)
-
-include(simd)
-
-################################ Exposed Configurations #######################################
-lite_option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
-lite_option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ON IF ${AVX_FOUND})
-lite_option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON)
-lite_option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF)
-lite_option(WITH_MKL "Compile PaddlePaddle with MKL support." ON IF ${AVX_FOUND})
-lite_option(WITH_ARM_DOTPROD "Compile PaddlePaddle with ARM dot production" ON)
-lite_option(WITH_SYSTEM_BLAS "Use system blas library" OFF)
-# TODO(Superjomn) Remove WITH_ANAKIN option if not needed latter.
-if(ANDROID OR IOS OR ARMLINUX)
- set(WITH_GPU OFF CACHE STRING
- "Disable GPU when cross-compiling for Android and iOS" FORCE)
- set(WITH_DSO OFF CACHE STRING
- "Disable DSO when cross-compiling for Android and iOS" FORCE)
- set(WITH_AVX OFF CACHE STRING
- "Disable AVX when cross-compiling for Android and iOS" FORCE)
- set(WITH_PYTHON OFF CACHE STRING
- "Disable PYTHON when cross-compiling for Android and iOS" FORCE)
- set(WITH_RDMA OFF CACHE STRING
- "Disable RDMA when cross-compiling for Android and iOS" FORCE)
- set(WITH_MKL OFF CACHE STRING
- "Disable MKL when cross-compiling for Android and iOS" FORCE)
-endif()
-
-# for lite, both server and mobile framework.
-lite_option(LITE_WITH_JAVA "Enable Java JNI lib in lite mode" OFF)
-lite_option(LITE_WITH_CUDA "Enable CUDA in lite mode" OFF)
-lite_option(LITE_WITH_X86 "Enable X86 in lite mode" ON)
-lite_option(LITE_WITH_ARM "Enable ARM in lite mode" OFF)
-lite_option(LITE_WITH_NPU "Enable NPU in lite mode" OFF)
-lite_option(LITE_WITH_OPENMP "Enable OpenMP in lite framework" ON)
-lite_option(LITE_WITH_OPENCL "Enable OpenCL support in lite" OFF)
-lite_option(LITE_WITH_FPGA "Enable FPGA support in lite" OFF)
-lite_option(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK "Enable light-weight framework" OFF)
-lite_option(LITE_WITH_PROFILE "Enable profile mode in lite framework" OFF)
-lite_option(LITE_WITH_PRECISION_PROFILE "Enable precision profile in profile mode ON in lite" OFF IF LITE_WITH_PROFILE)
-lite_option(LITE_SHUTDOWN_LOG "Shutdown log system or not." OFF)
-lite_option(LITE_ON_TINY_PUBLISH "Publish tiny predictor lib." OFF)
-lite_option(LITE_ON_MODEL_OPTIMIZE_TOOL "Build the model optimize tool" OFF)
-# publish options
-lite_option(LITE_BUILD_EXTRA "Enable extra algorithm support in Lite, both kernels and operators" OFF)
-
-set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
- "A path setting third party libraries download & build directories.")
-
-# CMAKE_BUILD_TYPE
-if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
- "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel"
- FORCE)
-endif()
-
-# check options
-if (LITE_ON_TINY_PUBLISH)
- if (NOT (WITH_LITE AND LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND NOT WITH_TESTING))#LITE_WITH_JAVA AND
- message(FATAL_ERROR "LITE_ON_TINY_PUBLISH=ON must be used with WITH_LITE=ON LITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON LITE_WITH_JAVA=ON WITH_TESTING=OFF")
- return()
- endif()
-endif()
-
-include_directories("${PADDLE_SOURCE_DIR}")
-# the generated header files.
-set(LITE_GENERATED_INCLUDE_DIR "${CMAKE_BINARY_DIR}")
-include_directories("${LITE_GENERATED_INCLUDE_DIR}")
-
-# for mobile
-if (WITH_LITE AND LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- message(STATUS "Building the mobile framework")
- include(cross_compiling/postproject)
- include(cross_compiling/npu) # check and prepare NPU DDK
-
- # We compile the mobile deployment library when LITE_ON_TINY_PUBLISH=ON
- # So the following third party dependencies are not needed.
- if (NOT LITE_ON_TINY_PUBLISH)
- # include the necessary thirdparty dependencies
- include(external/gflags) # download, build, install gflags
- # LITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON will disable glog
- # TODO(sangoly): refine WITH_LITE and LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
- include(external/gtest) # download, build, install gtest
- include(ccache) # set ccache for compilation
- include(external/protobuf) # download, build, install protobuf
- endif()
-
- # for opencl
- if (LITE_WITH_OPENCL)
- include(external/opencl-headers)
- include(external/opencl-clhpp)
- endif()
-
- include(generic) # simplify cmake module
- include(configure) # add paddle env configuration
-
- add_subdirectory(lite)
- return()
-endif()
-################################# End of mobile compile ##############################
-
-set(WITH_MKLML ${WITH_MKL})
-if (NOT DEFINED WITH_MKLDNN)
- if (WITH_MKL AND AVX2_FOUND)
- set(WITH_MKLDNN ON)
- else()
- message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN")
- set(WITH_MKLDNN OFF)
- endif()
-endif()
-
-########################################################################################
-
-include(external/mklml) # download mklml package
-include(external/xbyak) # download xbyak package
-include(external/libxsmm) # download, build, install libxsmm
-include(external/gflags) # download, build, install gflags
-include(external/glog) # download, build, install glog
-include(external/gtest) # download, build, install gtest
-include(external/protobuf) # download, build, install protobuf
-include(external/openblas) # download, build, install openblas
-include(external/mkldnn) # download, build, install mkldnn
-include(external/eigen) # download eigen3
-include(external/xxhash) # download install xxhash needed for x86 jit
-
-include(cudnn)
-include(configure) # add paddle env configuration
-
-if(LITE_WITH_CUDA)
- include(cuda)
-endif()
-
-include(generic) # simplify cmake module
-include(ccache) # set ccache for compilation
-include(util) # set unittest and link libs
-include(version) # set PADDLE_VERSION
-
-set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG")
-set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG")
-
-add_subdirectory(lite)
diff --git a/Home.md b/Home.md
new file mode 100644
index 0000000000000000000000000000000000000000..f521dbf1713842b5a981ee8cae332f43a7675148
--- /dev/null
+++ b/Home.md
@@ -0,0 +1,54 @@
+# Paddle Lite 文档
+## 总体概述
+
+Paddle-Lite 框架是 PaddleMobile 新一代架构,重点支持移动端推理预测,特点**高性能、多硬件、轻量级** 。支持PaddleFluid/TensorFlow/Caffe/ONNX模型的推理部署,目前已经支持 ARM CPU, Mali GPU, Adreno GPU, Huawei NPU 等多种硬件,正在逐步增加 X86 CPU, Nvidia GPU 等多款硬件,相关硬件性能业内领先。
+
+
+## 简介
+
+- [技术特点](./tech_highlights)
+- [架构设计](./architecture)
+- [Road Map](./roadmap)
+
+## Benchmark
+
+- [最新性能](./benchmark)
+- [测试方法](./benchmark_tools)
+
+## 安装
+
+- [源码编译](./source_compile)
+
+## 使用
+
+- [使用流程](./tutorial)
+- [C++实例](./cpp_demo)
+- [Java实例](./java_demo)
+- [Android/IOS APP demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo)
+- [模型转化方法](./model_optimize_tool)
+
+## 进阶
+
+- [通过 X2Paddle 支持 Caffe, TensorFlow 模型](x2paddle)
+- [模型量化](./model_quantization)
+- [支持Op列表](./support_operation_list)
+- [新增Op方法](./add_new_operation)
+- [测试工具](./debug_tools)
+- [调试方法](./debug_tools)
+- [使用华为NPU](./npu)
+- [使用Android GPU](./opencl)
+- [使用FPGA](./fpga)
+
+## 开发者文档
+
+- [开发基础须知](./for-developer)
+- [架构详解](./architecture-intro)
+
+## FAQ
+
+- 问题或建议可以[发Issue](https://github.com/PaddlePaddle/Paddle-Lite/issues),为加快问题解决效率,可先检索是否有类似问题,我们也会及时解答!
+- 欢迎加入Paddle-Lite百度官方QQ群:696965088
+
+## paddle-mobile
+
+- [paddle-mobile 编译](./mobile)
diff --git a/README.md b/README.md
deleted file mode 100644
index e32840a21dba66cc698b47ff7ee6436ab2b0124b..0000000000000000000000000000000000000000
--- a/README.md
+++ /dev/null
@@ -1,74 +0,0 @@
-[中文版](./README_cn.md)
-
-# Paddle Lite
-
-
-[](https://github.com/PaddlePaddle/Paddle-Lite/wiki)
-[](LICENSE)
-
-
-
-Paddle Lite is an updated version of Paddle-Mobile, an open-open source deep learning framework designed to make it easy to perform inference on mobile, embeded, and IoT devices. It is compatible with PaddlePaddle and pre-trained models from other sources.
-
-For tutorials, please see [PaddleLite Wiki](https://github.com/PaddlePaddle/Paddle-Lite/wiki).
-
-## Key Features
-
-### Light Weight
-
-On mobile devices, execution module can be deployed without third-party libraries, because our excecution module and analysis module are decoupled.
-
-On ARM V7, only 800KB are taken up, while on ARM V8, 1.3MB are taken up with the 80 operators and 85 kernels in the dynamic libraries provided by Paddle Lite.
-
-Paddle Lite enables immediate inference without extra optimization.
-
-### High Performance
-
-Paddle Lite enables device-optimized kernels, maximizing ARM CPU performance.
-
-It also supports INT8 quantizations with [PaddleSlim model compression tools](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim), reducing the size of models and increasing the performance of models.
-
-On Huawei NPU and FPGA, the performance is also boosted.
-
-The latest benchmark is located at [benchmark](https://github.com/PaddlePaddle/Paddle-Lite/wiki/benchmark)
-
-### High Compatibility
-
-Hardware compatibility: Paddle Lite supports a diversity of hardwares — ARM CPU, Mali GPU, Adreno GPU, Huawei NPU and FPGA. In the near future, we will also support AI microchips from Cambricon and Bitmain.
-
-Model compatibility: The Op of Paddle Lite is fully compatible to that of PaddlePaddle. The accuracy and performance of 18 models (mostly CV models and OCR models) and 85 operators have been validated. In the future, we will also support other models.
-
-Framework compatibility: In addition to models trained on PaddlePaddle, those trained on Caffe and TensorFlow can also be converted to be used on Paddle Lite, via [X2Paddle](https://github.com/PaddlePaddle/X2Paddle). In the future to come, we will also support models of ONNX format.
-
-## Architecture
-
-Paddle Lite is designed to support a wide range of hardwares and devices, and it enables mixed execution of a single model on multiple devices, optimization on various phases, and leight-weighted applications on devices.
-
-
-
-As is shown in the figure above, analysis phase includes Machine IR module, and it enables optimizations like Op fusion and redundant computation pruning. Besides, excecution phase only involves Kernal exevution, so it can be deployed on its own to ensure maximized light-weighted deployment.
-
-## Key Info about the Update
-
-The earlier Paddle-Mobile was designed to be compatible with PaddlePaddle and multiple hardwares, including ARM CPU, Mali GPU, Adreno GPU, FPGA, ARM-Linux and Apple's GPU Metal. Within Baidu, inc, many product lines have been using Paddle-Mobile. For more details, please see: [mobile/README](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/mobile/README.md).
-
-As an update of Paddle-Mobile, Paddle Lite has incorporated many older capabilities into the [new architecture](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite). For the time being, the code of Paddle-mobile will be kept under the directory `mobile/`, before complete transfer to Paddle Lite.
-
-For demands of Apple's GPU Metal and web front end inference, please see `./metal` and `./web` . These two modules will be further developed and maintained.
-
-## Special Thanks
-
-Paddle Lite has referenced the following open-source projects:
-
-- [ARM compute library](http://agroup.baidu.com/paddle-infer/md/article/%28https://github.com/ARM-software/ComputeLibrary%29)
-- [Anakin](https://github.com/PaddlePaddle/Anakin). The optimizations under Anakin has been incorporated into Paddle Lite, and so there will not be any future updates of Anakin. As another high-performance inference project under PaddlePaddle, Anakin has been forward-looking and helpful to the making of Paddle Lite.
-
-
-## Feedback and Community Support
-
-- Questions, reports, and suggestions are welcome through Github Issues!
-- Forum: Opinions and questions are welcome at our [PaddlePaddle Forum](https://ai.baidu.com/forum/topic/list/168)!
-- WeChat Official Account: PaddlePaddle
-- QQ Group Chat: 696965088
-

- WeChat Official Account QQ Group Chat
diff --git a/README_cn.md b/README_cn.md
deleted file mode 100644
index d2111786b13b6d2b1ee25a5678809a9097e39466..0000000000000000000000000000000000000000
--- a/README_cn.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Paddle Lite
-
-
-[](https://github.com/PaddlePaddle/Paddle-Lite/wiki)
-[](LICENSE)
-
-
-Paddle Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和PaddlePaddle无缝对接外,也兼容支持其他训练框架产出的模型。
-
-完整使用文档位于 [PaddleLite Wiki](https://github.com/PaddlePaddle/Paddle-Lite/wiki) 。
-
-## 特性
-
-### 轻量级
-执行阶段和计算优化阶段实现良好解耦拆分,移动端可以直接部署执行阶段,无任何第三方依赖。
-包含完整的80个 Op+85个 Kernel 的动态库,对于ARMV7只有800K,ARMV8下为1.3M,并可以裁剪到更低。
-在应用部署时,载入模型即可直接预测,无需额外分析优化。
-
-### 高性能
-极致的 ARM CPU 性能优化,针对不同微架构特点实现kernel的定制,最大发挥计算性能,在主流模型上展现出领先的速度优势。
-支持INT8量化计算,结合 [PaddleSlim 模型压缩工具](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim) 中 INT8量化训练功能,可以提供高精度高性能的预测能力。
-在Huawei NPU, FPGA上也具有有很好的性能表现。
-
-最新 Benchmark 位于 [benchmark](https://github.com/PaddlePaddle/Paddle-Lite/wiki/benchmark)。
-
-### 通用性
-硬件方面,Paddle Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等AI芯片,未来会增加对更多硬件的支持。
-
-模型支持方面,Paddle Lite和PaddlePaddle训练框架的Op对齐,提供更广泛的模型支持能力。目前已严格验证18个模型85个OP的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的OCR模型的支持。未来会持续增加更多模型的支持验证。
-
-框架兼容方面:除了PaddlePaddle外,对其他训练框架也提供兼容支持。当前,支持Caffe 和 TensorFlow 训练出来的模型,通过X2Paddle (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对ONNX等格式模型提供兼容支持。
-
-## 架构
-
-PaddleLite 的架构设计着重考虑了对多硬件和平台的支持,并且强化了多个硬件在一个模型中混合执行的能力,多个层面的性能优化处理,以及对端侧应用的轻量化设计。
-
-
-
-其中,Analysis Phase 包括了 MIR(Machine IR) 相关模块,能够对原有的模型的计算图针对具体的硬件列表进行算子融合、计算裁剪 在内的多种优化。Execution Phase 只涉及到Kernel 的执行,且可以单独部署,以支持极致的轻量级部署。
-
-
-## Paddle-Mobile升级为Paddle Lite的说明
-原Paddle-Mobile作为一个致力于嵌入式平台的PaddlePaddle预测引擎,已支持多种硬件平台,包括ARM CPU、 Mali GPU、Adreno GPU,以及支持苹果设备的GPU Metal实现、ZU5、ZU9等FPGA开发板、树莓派等arm-linux开发板。在百度内已经过广泛业务场景应用验证。对应设计文档可参考: [mobile/README](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/mobile/README.md)
-
-Paddle-Mobile 整体升级重构并更名为Paddle Lite后,原paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原Paddle-mobile代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。
-
-metal, web的模块相对独立,会继续在 `./metal` 和 `./web` 目录下开发和维护。对苹果设备的GPU Metal实现的需求及web前端预测需求,可以直接进入这两个目录。
-
-## 致谢:
-Paddle Lite 借鉴了以下开源项目:
-- [ARM compute library]((https://github.com/ARM-software/ComputeLibrary))
-- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin对应底层的一些优化实现已被集成到Paddle Lite。Anakin作为PaddlePaddle组织下的一个高性能预测项目,极具前瞻性,对Paddle Lite有重要贡献。Anakin已和本项目实现整合。之后,Anakin不再升级。
-
-## 交流与反馈
-* 欢迎您通过Github Issues来提交问题、报告与建议
-* 微信公众号:飞桨PaddlePaddle
-* QQ群: 696965088
-
-

- 微信公众号 官方技术交流QQ群
-
-* 论坛: 欢迎大家在[PaddlePaddle论坛](https://ai.baidu.com/forum/topic/list/168)分享在使用PaddlePaddle中遇到的问题和经验, 营造良好的论坛氛围
diff --git a/add_new_operation.md b/add_new_operation.md
new file mode 100644
index 0000000000000000000000000000000000000000..a077a20696e85684ac61980a100346b5b506fe23
--- /dev/null
+++ b/add_new_operation.md
@@ -0,0 +1,189 @@
+# 新增op的方法
+
+以下以添加argmax为例,详细说明新增op的方法步骤。
+
+## 1. 添加OpParam 结构体以传导 Op 的输入和输出
+
+- 这里命名为 `ArgmaxParam`
+
+- 在 `paddlelite/lite/operators/op_params.h` 中添加 `ArgmaxParam` 结构体,代码如下:
+ ```c++
+ struct ArgmaxParam {
+ lite::Tensor* X{};
+ lite::Tensor* Out{};
+ int Axis{0};
+ };
+ ```
+## 2. 添加 Argmax Op 并注册
+
+- 在paddlelite/lite/operators/目录下新建argmax_op.h文件,主要代码如下:
+ ```c++
+ class ArgmaxOpLite : public OpLite {
+ public:
+ ArgmaxOpLite() {}
+ explicit ArgmaxOpLite(const std::string &op_type) : OpLite(op_type) {}
+ bool CheckShape() const override;
+ bool InferShape() const override;
+ bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
+ void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
+ std::string DebugString() const override { return "argmax"; }
+ private:
+ mutable ArgmaxParam param_;
+ };
+ ```
+ `ArgmaxOpLite` 继承 `OpLite` ,成员变量包括 `ArgmaxParam` 结构体,需要实现的接口包括 `CheckShape()` 、`InferShape()` 、`AttachImp()` 、`AttachKernel()` 和 `DebugString()` 函数。`AttachKernel()` 和 `DebugString() `函数较为简单,此处直接实现;
+
+- 在 `paddlelite/lite/operators/` 目录下新建argmax_op.cc文件,需要具体实现`CheckShape()`、`InferShape()`和`AttachImp()`函数。`CheckShape()`函数检查输入是否符合要求,`InferShape()`函数基于输入推断得到输出的维度,`AttachImp()`函数绑定Op的输入输出。然后在argmax_op.cc文件中注册argmax,核心代码如下:
+ ```c++
+ bool ArgmaxOpLite::CheckShape() const {
+ CHECK_OR_FALSE(param_.X);
+ CHECK_OR_FALSE(param_.Out);
+ CHECK_OR_FALSE(param_.Axis < (param_.X)->dims().size());
+ return true;
+ }
+
+ bool ArgmaxOpLite::InferShape() const {
+ auto x_dims = param_.X->dims();
+ int x_rank = x_dims.size();
+ int axis = param_.Axis;
+ if (axis < 0) axis += x_rank;
+
+ std::vector out_dims;
+ for (int64_t i = 0; i < axis; i++) {
+ out_dims.push_back(x_dims[i]);
+ }
+ for (int64_t i = axis + 1; i < x_rank; i++) {
+ out_dims.push_back(x_dims[i]);
+ }
+
+ // Set output dims
+ param_.Out->Resize(lite::DDim(out_dims));
+ return true;
+ }
+
+ bool ArgmaxOpLite::AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) {
+ auto x = op_desc.Input("X").front();
+ auto out = op_desc.Output("Out").front();
+
+ param_.X = scope->FindVar(x)->GetMutable();
+ param_.Out = scope->FindVar(out)->GetMutable();
+ param_.Axis = op_desc.GetAttr("Axis");
+
+ return true;
+ }
+ REGISTER_LITE_OP(argmax, paddle::lite::operators::ArgmaxOpLite);
+ ```
+- 在paddlelite/lite/operators/CMakeLists.txt中添加```lite_cc_library(argmax_op SRCS argmax_op.cc DEPS ${op_DEPS})```,并且在set ops lite 中添加argmax_op;
+- 在paddlelite/lite/api/paddle_use_ops.h中添加```USE_LITE_OP(argmax)```。
+
+## 3. 添加Argmax Kernel并绑定
+以下以arm端argmax实现为例说明
+- 在paddlelite/lite/kernels/arm/目录下新建argmax_compute.h文件,声明ArgmaxCompute类,并继承KernelLite,主要代码如下:
+ ```c++
+ class ArgmaxCompute : public KernelLite {
+ public:
+ using param_t = operators::ArgmaxParam;
+ void Run() override;
+ virtual ~ArgmaxCompute() = default;
+ };
+ ```
+- 在paddlelite/lite/kernels/arm/目录下新建argmax_compute.cc文件,主要实现Run函数。`Run()`函数调用paddlelite/lite/arm/math/argmax.h中的`argmax_func()`函数,根据输入计算输出。最后在argmax_compute.cc文件中,我们绑定argmax的输入输出(为tensor的输入参数都需要绑定),代码如下:
+ ```c++
+ void ArgmaxCompute::Run() {
+ auto& param = Param();
+ lite::Tensor* input = param.X;
+ lite::Tensor* output = param.Out;
+ int axis = param.Axis;
+ lite::arm::math::argmax_func(input, axis, output);
+ return;
+ }
+
+ REGISTER_LITE_KERNEL(
+ argmax, kARM, kFloat, kNCHW, paddle::lite::kernels::arm::ArgmaxCompute, def)
+ .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))})
+ .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))})
+ .Finalize();
+ ```
+
+- 在paddlelite/lite/kernels/arm/CMakeLists.txt中添加
+ ```cmake
+ lite_cc_library(argmax_compute_arm SRCS argmax_compute.cc DEPS ${lite_kernel_deps} math_arm)
+ ```
+ CMakeLists.txt中set arm_kernels需要添加argmax_compute_arm;
+- 在paddlelite/lite/api/paddle_use_kernels.h中添加```USE_LITE_KERNEL(argmax, kARM, kFloat, kNCHW, def)```。
+
+## 4. 添加Argmax实现
+- 在paddlelite/lite/arm/math/目录下新建argmax.h文件,声明`argmax_func()`函数,代码如下:
+ ```c++
+ void argmax_func(const lite::Tensor* input, const int axis, lite::Tensor* output);
+ ```
+- 在paddlelite/lite/arm/math/目录下新建argmax.cc文件,具体实现`argmax_func()`函数,代码如下:
+ ```c++
+ void argmax_func(const lite::Tensor *input,
+ const int axis,
+ lite::Tensor *output) {
+ auto input_ddim = input->dims();
+ auto output_ddim = output->dims();
+
+ const int size = input_ddim[axis];
+ const int in_channel = input_ddim.count(axis, input_ddim.size());
+ const int out_channel = output_ddim.count(axis, output_ddim.size());
+ const int in_stride = input_ddim.count(axis + 1, input_ddim.size());
+ const int out_stride = input_ddim.count(0, axis);
+
+ for (int n = 0; n < out_stride; n++) {
+ for (int k = 0; k < in_stride; k++) {
+ const float *in_ptr = input->data() + n * in_channel + k;
+ std::vector> vec;
+ vec.resize(size);
+ for (int i = 0; i < size; i++) {
+ vec[i] = std::make_pair(in_ptr[i * in_stride], i);
+ }
+ // sort
+ std::partial_sort(vec.begin(),
+ vec.begin() + 1,
+ vec.end(),
+ std::greater>());
+
+ // out
+ float *out_ptr = output->mutable_data() + n * out_channel + k;
+ *out_ptr = vec[0].second;
+ }
+ }
+ }
+ ```
+- 在paddlelite/lite/arm/math/CMakeFile.txt中的```math_arm library```中添加argmax.cc,在paddlelite/lite/arm/math/funcs.h中添加```#include "lite/arm/math/argmax.h"```
+
+## 5. 添加Argmax单测
+- 在paddlelite/lite/tests/kernels目录下新建argmax_compute_test.cc文件,声明并实现ArgmaxComputeTester类;
+- ArgmaxComputeTester类中主要包括PrepareOpDesc、PrepareData和RunBaseline函数。PrepareOpDesc函数设定单测op的类型和输入输出参数,PrepareData函数对输入tensor进行初始化,RunBaseline是基于输入计算得到输出,用于和框架计算的输出进行对比;
+- 使用gtest添加单测,代码如下:
+ ```c++
+ TEST(Argmax, precision) {
+ #ifdef LITE_WITH_ARM
+ LOG(INFO) << "test argmax arm";
+ Place place(TARGET(kARM));
+
+ for (int axis : {0, 1, 2, 3}) {
+ for (int n : {1, 3}) {
+ for (int c : {3, 6}) {
+ for (int h : {9, 18}) {
+ for (int w : {9, 18}) {
+ std::unique_ptr tester(
+ new ArgmaxComputeTester(place, "def", axis, n, c, h, w));
+ arena::Arena arena(std::move(tester), place, 2e-5);
+ arena.TestPrecision();
+ }
+ }
+ }
+ }
+ }
+ #endif
+ }
+ ```
+- 在paddlelite/lite/tests/kernels/CMakeLists.txt中添加
+ ```cmake
+ lite_cc_test(test_kernel_argmax_compute SRCS argmax_compute_test.cc DEPS arena_framework ${x86_kernels} ${arm_kernels} ${lite_ops} ${host_kernels})
+ ```
+# 6. 编译运行
+- 在paddlelite目录中,执行```./lite/tools/ci_build.sh build_test_arm```,该脚本会创建手机模拟器,并编译运行所有单测(花费时间较久)。如果运行无误,则表明添加argmax成功。
diff --git a/architecture-intro.md b/architecture-intro.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7a705677c48f105853dfc3764372ea445d1a0cd
--- /dev/null
+++ b/architecture-intro.md
@@ -0,0 +1,247 @@
+# Paddle-Lite 开发者文档
+
+这篇文档会从开发者角度详细介绍开发 Paddle-Lite 需要的相关信息。
+
+
+
+## 设计及思考
+
+近年来,各种深度学习预估硬件称出不穷,从手机APP到车载设备,再到音箱,均需要部署深度学习预测,且有如下共性需求:
+
+1. 高性能
+2. 硬件支持和扩展容易
+3. 轻量级部署
+
+Paddle-Lite 的架构方面便是定向参考如上需求设计实现的,具体地
+
+- 高性能方面
+ - 通过 MIR(Machine IR) 实现精细复杂的计算图的分析和优化
+ - 执行期 Kernel 的简单设计,几乎没有额外调度开销
+ - 适当的硬件层抽象,框架支持各个硬件后端中做特定的调度实现
+- 轻量级部署方面
+ - 拆分分析和执行两个阶段,执行阶段轻量级实现,可以单独部署
+ - 轻量级 Op 和 Kernel 设计
+- 硬件支持和扩展方面
+ - 通过 MIR 支撑带硬件和执行信息的宏观分析优化
+ - TypeSystem 抽象带硬件的不同计算模式的表示,实现整个计算图的强类型推导,以及执行状态机的静态分析
+
+Paddle-Lite 的架构尝试从强类型推导的角度建模支持多硬件,多种计算模式(不同量化精度、不同的 data layout等)的混合计算,从而实现宏观上的各异硬件和计算模式的混合。
+
+框架部分已经经过 FPGA,GPU,NPU 等异构硬件的打磨,各项能力也在完善中。
+
+## 重要模块介绍
+
+### OpLite
+
+[OpLite](https://github.com/PaddlePaddle/Paddle-Lite/blob/v2.0.0-beta1-prerel/lite/core/op_lite.h#L52) 是 Paddle-Lite 中的 Operator,用户扩展单个硬件时,最多的就是扩展 Op 和 Kernel。
+
+重要方法如下:
+
+```c++
+class OpLite : public Registry {
+ public:
+ // Check the shape.
+ virtual bool CheckShape() const { return true; }
+ // Inference the outputs' shape.
+ virtual bool InferShape() const { return true; }
+ // Link the external execution environ to internal context.
+ bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope);
+};
+```
+
+其中,分析期执行
+
+- `AttachImpl`
+
+执行期执行
+
+- `CheckShape`
+- `InferShape`
+
+扩展须知:
+
+1. `CheckShape` 只在第一个 batch 执行,所以耗时不敏感
+
+2. `InferShape` 需要在每个 batch 执行,应该严格耗时
+
+ 1. 可以通过添加 member variable 的方式,对其中一部分信息增加 cache,比如
+
+ ```c++
+ class XXOp : public OpLite {
+ void InferShape() {
+ int batch_size = param().input.shape[0];
+ if (!shape_cache_.empty()) {
+ shape_cache_[0] = batch_size;
+ param().output->Resize(shape_cache_);
+ }
+ }
+
+ private:
+ shape_t shape_cache_;
+ }
+ ```
+
+
+
+### OpParam
+
+[OpParam](https://github.com/PaddlePaddle/Paddle-Lite/blob/v2.0.0-beta1-prerel/lite/operators/op_params.h) 用于存储执行期 Kernel 需要的各项参数。 所有字段可以直接存储(比如指针或者 `int`),以避免执行中获取参数的延迟。
+
+因为没有需求,OpParam 暂时没有设置基类。
+
+实际例子:
+
+```c++
+// For Softmax op
+struct SoftmaxParam {
+ lite::Tensor* x{};
+ lite::Tensor* output{};
+ int axis{-1};
+};
+```
+
+OpLite 的 `AttachImpl` 方法就用于构建 `OpParam` ,复制传递给 `Kernel` 用于执行。
+
+OpParam 是执行期的重要模块,需要严格保证性能,相应的扩展要求:
+
+1. 字段的获取必须是低延迟的,可以直接用指针,或者直接复制值
+2. 避免执行无关信息混入,包括 debug 信息
+3. 命名需要与 Paddle OpDesc 中的信息严格一致,以降低功能对齐和理解的难度
+
+### Kernel
+
+```c++
+template
+class KernelLite : public KernelBase {
+ public:
+ // Run the kernel.
+ virtual void Run() { CHECK(false) << "Not Implemented"; }
+
+ TargetType target() const override { return Target; }
+ PrecisionType precision() const override { return Precision; }
+ DataLayoutType layout() const override { return DataLayout; }
+ Place place() const override { return Place{Target, Precision, DataLayout}; }
+ std::string name() const override;
+};
+```
+
+由于是执行期的重要概念,因此 Kernel 设计地非常简单高效。
+
+其中,执行期的 `Run` 是其唯一重要的接口,其中包含具体的计算逻辑。
+
+模板中的参数主要用于方便多硬件编译,以及自解释:
+
+- Target: 执行硬件
+- Precision: 主要的计算精度
+- DataLayout:主要计算的 data layout
+
+这部分信息用于帮助挑选 kernel,具体的值并不严格。
+
+
+
+Kernel 的注册需要用到 TypeSystem,不光对 Kernel 本身的特性进行描述,对其输入和输出均进行详尽的定义。
+
+例如 FullyConnected 的注册
+
+```c++
+REGISTER_LITE_KERNEL(
+ fc, kARM, kFloat, kNCHW, paddle::lite::kernels::arm::FcCompute, def)
+ .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat), LAYOUT(kNCHW))})
+ .BindInput("Bias", {LiteType::GetTensorTy(TARGET(kARM))})
+ .BindInput("W", {LiteType::GetTensorTy(TARGET(kARM))})
+ .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))})
+ .Finalize();
+```
+
+Kernel自身定义是 `kARM` 的,也就是ARM上的kernel,主要的计算精度是 `kFloat`,主要的 Data layout 是 `kNCHW`。
+
+接着会对其所有的输入和输出做详细定义,比如看 `Input` 输入的定义是 `LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat), LAYOUT(kNCHW))`,也就是声明其 Target 是 `kARM`, PRECISION 是 `kFloat`,Data Layout 是 `kNCHW`。
+
+这里的设计思想是类似C++中的函数重载,同一个 Kernel(的名字),在重载了其输入输出的类型之后可以是不同的kernel。
+
+#### 扩展须知
+
+1. 模板参数选用计算中主要的来表示
+ 1. 比如,scale kernel,同时能接受 `float` 和 `int` 的输入,但其不算量化 kernel,那应该设置为 `Precision=float`,代表常规的计算精度中使用
+2. Kernel 输入输出的定义需要足够精确,是什么类型就是什么类型;框架会根据其输入输出的定义来动态构建状态机,否则会出现分析期和执行期的状态机不一致,造成未定义行为
+
+### MIR
+
+MIR 类似于 LLVM 里的 IR,只是加上了硬件和执行期的信息参与分析优化。
+
+Pass 是MIR中的模块化策略,其输入和输出都是 SSA Graph.
+
+框架会自动基于模型的Program 构建 SSA Graph,之后按 [Optimizer](https://github.com/PaddlePaddle/Paddle-Lite/blob/v2.0.0-beta1-prerel/lite/core/optimizer.h) 中定义的pass的顺序调用一系列 Pass。
+
+#### Op Fusion
+
+MIR 中的 [PatternMacher](https://github.com/PaddlePaddle/Paddle-Lite/blob/v2.0.0-beta1-prerel/lite/core/mir/pattern_matcher.h) 实现了简单有效的基于图的模板识别的算法,相关的 op fusion 的图操作可以基于此实现。
+
+实际的例子可以参考 [fc_fuse_pass.h](https://github.com/PaddlePaddle/Paddle-Lite/blob/v2.0.0-beta1-prerel/lite/core/mir/fusion/fc_fuse_pass.h)。
+
+### TypeSystem
+
+TypeSystem 是 Paddle-Lite 中构建复杂计算图的基础模块,核心思想是协助 SSA Graph 构建一个状态机,表示其中不同的状态。
+
+这里的 Type 主要包含下面四组信息,更多的信息可以按需扩展:
+
+- TargetType
+- Precision
+- DataLayout
+- device id,用于表示卡号
+
+
+
+状态机的表示:
+
+```python
+Tensor0(kARM, kFloat, kNCHW) --pass--> Tensor1(kOpenCL, kFloat, kNCHW)
+```
+
+MIR 会识别出,Tensor0 和 Tensor1 的硬件位置不同,因此触发相依的 Pass 插入对应的 cast op 来进行 type cast,比如
+
+```
+Tensor0(kARM, kFloat, kNCHW) --pass-> IoCopyOp(kARM, kOpenCL) --pass-> Tensor1(kOpenCL, kFloat, kNCHW)
+```
+
+### KernelContext
+
+KernelContext 是硬件支持的核心封装,主要用于为 Kernel 提供执行期的硬件上下文。
+
+KernelContext 的设计类似于 OpParam,两者均没有基类;对于 KernelContext,其假定是,不同的硬件间的接口和逻辑可能完全不同,比如 kARM 和 kCUDA,因此不设定基类,也不需要提供统一的接口来封装不同硬件行为。
+
+不同硬件的 KernelContext 直接与该硬件对应的 Kernel 对接。
+
+KernelContext 的行为可以被 MIR 在分析期确定和调度。
+
+注意事项:
+
+1. 由于是执行期概念,KernelContext 也需要注意性能和轻量化
+2. 移动端部署时只会部署执行期,因此 MIR 和 KernelContext 会拆开,因此 KernelContext 相应的设置需要能够序列化到 ProgramDesc 中,以便执行期载入和执行
+
+## 扩展硬件后端
+
+### 扩展现有的硬件后端
+
+主要是扩充 Op 和 Kernel 的工作,如果需要 fuse,则参考 MIR 章节,增加相应的fuse pass便可,具体地,可以参考
+
+- [fc_op](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/operators/fc_op.h) 实现类似的 Op
+- [fc_compute](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/kernels/arm/fc_compute.h) 实现类似的 Kernel
+- [fc_fuse_pass](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/core/mir/fusion/fc_fuse_pass.h) 实现fuse逻辑,并注册到 [optimizer](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/core/optimizer.h)
+
+### 扩展全新硬件后端
+
+需要额外扩充如下模块,让框架能够支撑硬件执行:
+
+- TypeSystem,需要扩充其中相关的 type
+ - 相关 [enum](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/api/paddle_place.h#L44)
+- MIR,需要扩展其中的 type cast 相关的 pass
+ - [TargetType cast pass](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/core/mir/type_target_cast_pass.cc) 用于拷贝不同硬件上的tensor
+ - [Data layout cast pass](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/core/mir/type_target_cast_pass.h) 用于转化不同的 data layout
+ - [Precision cast pass](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/core/mir/type_precision_cast_pass.h) 用于转化不同 tensor 的量化精度
+- KernelContext,具体地可以参考
+ - [ARM context](https://github.com/PaddlePaddle/Paddle-Lite/blob/release/v2.0.0-beta1/lite/core/context.h#L91)
+ - 需要注意的是,硬件 context 的接口只服务于该硬件的 kernel
+ - context 有分析期和执行期两个阶段,如果分析期没有特殊的优化,则无需考虑;否则,需要注意将分析期的信息整理并序列化到离线模型中,用于执行期直接加载。
\ No newline at end of file
diff --git a/architecture.md b/architecture.md
new file mode 100644
index 0000000000000000000000000000000000000000..fbcd9b05ebce9d393d4e6f532a54ab6372bfda27
--- /dev/null
+++ b/architecture.md
@@ -0,0 +1,94 @@
+# 架构设计
+
+Mobile 在这次升级为 Lite 架构, 侧重多硬件、高性能的支持,其主要设计思想如下
+
+- 引入 Type system,强化多硬件、量化方法、data layout 的混合调度能力
+- 硬件细节隔离,通过不同编译开关,对支持的任何硬件可以自由插拔
+- 引入 MIR(Machine IR) 的概念,强化带执行环境下的优化支持
+- 优化期和执行期严格隔离,保证预测时轻量和高效率
+
+架构图如下
+
+
+
+## 编译期和执行期严格隔离设计
+
+- compile time 优化完毕可以将优化信息存储到模型中;execution time 载入并执行
+- 两套 API 及对应的预测lib,满足不同场景
+ - `CxxPredictor` 打包了 `Compile Time` 和 `Execution Time`,可以 runtime 在具体硬件上做分析和优化,得到最优效果
+ - `MobilePredictor` 只打包 `Execution Time`,保持部署和执行的轻量
+
+## `Execution Time` 轻量级设计和实现
+
+- 每个 batch 实际执行只包含两个步骤执行
+ - `Op.InferShape`
+ - `Kernel.Run`,Kernel 相关参数均使用指针提前确定,后续无查找或传参消耗
+ - 设计目标,执行时,只有 kernel 计算本身消耗
+- 轻量级 `Op` 及 `Kernel` 设计,避免框架额外消耗
+ - `Op` 只有 `CreateKernels` 和 `InferShape` 两个重要职能
+ - `Kernel` 只有 `Run` 职能
+
+## 多硬件后端支持
+
+- 硬件通用行为,使用 `TargetWrapper` 模块做适配器适配,对上层框架提供一致界面
+- 框架上层策略保持硬件无关,如存储优化 (Memory optimize),计算剪枝 (Computation prune) 等,任何硬件接入均可直接复用
+- 框架支持了硬件通用行为,特定硬件细节不做过多约束,各硬件可以自行实现并接入框架
+- 计算模式上目前支持两种主流模型,一种是类似 X86, ARM CPU 等非异构设备;一种是 GPU,或 FPGA 等异构设备(支持 stream, event异步执行模式以及跨设备拷贝)
+
+---
+## 多硬件及算法混合调度支持
+`TensorTy` 用来表示 Tensor 类型
+
+```c++
+struct TensorTy {
+ TargetType target;
+ PrecisionType precision;
+ DataLayout layout;
+ int deviceid;
+};
+```
+
+```c++
+enum class TargetType { kARM, kX86, kCUDA, kOpenCL };
+enum class PrecisionType { kFP32, kFP16, kInt8, kInt16 };
+enum class DataLayout { kNCHW, kNHWC };
+```
+---
+
+注册 Kernel,确定特定 Kernel 的输入输出特征
+
+```c++
+REGISTER_LITE_KERNEL(
+ mul, kARM, kFloat, kNCHW, arm::MulCompute, def)
+ .BindInput("X", {LiteType::GetTensorTy(kARM, kFloat, kNCHW)})
+ .BindInput("Y", {LiteType::GetTensorTy(kARM, kFloat, kNCHW))})
+ .BindOutput("Out", {LiteType::GetTensorTy(kARM, kFloat, kNCHW)})
+ .Finalize();
+```
+
+---
+
+同一个 Op 的不同 Kernel 类似函数重载
+
+用于支持任意的混合调度:
+
+1. 标记模型中所有 tensor 的 Type
+2. 标记 Kernel 的 硬件、执行精度、data layout 等信息
+
+全局做类型推断,当发现 tensor 传递中有类型冲突,采用 type cast 操作,通过插入特定功能 Op 来实现正确的传导
+
+
+
+
+
+---
+
+## MIR 用于图分析优化
+
+基于 Type System 的 SSA,通过 IR Pass 对计算图进行分析和优化:
+
+- 支持对整个 graph 进行类型推断,发现类型冲突并加入 type cast op,来支持通用混合调度
+- 计算剪枝 (Compute prune),比如去掉 scale(1), assign op 等
+- 存储优化 (Memory optimize)
+- 操作熔合 (Operator fuse)(已经支持 fc, conv_bn, ele_add+act 等6种 fuse 策略)
+- 支持量化处理(已支持 Int8预测)
\ No newline at end of file
diff --git a/benchmark.md b/benchmark.md
new file mode 100644
index 0000000000000000000000000000000000000000..8125e02218e52ab49cf59be30466e019fefddee1
--- /dev/null
+++ b/benchmark.md
@@ -0,0 +1,162 @@
+# Benchmark
+
+可以参考[benchmark_tools](https://github.com/PaddlePaddle/Paddle-Lite/wiki/benchmark_tools),推荐**一键benchmark**。
+
+## 测试环境
+
+* 测试模型
+ * fp32模型
+ * mobilenet_v1
+ * mobilenet_v2
+ * squeezenet_v1.1
+ * mnasnet
+ * shufflenet_v2
+
+ * int8模型
+ * mobilenet_v1
+ * mobilenet_v2
+ * resnet50
+
+* 测试机器(android ndk ndk-r17c)
+ * 骁龙855
+ * xiaomi mi9, snapdragon 855
+ * 4xA76(1@2.84GHz + 3@2.4GHz) + 4xA55@1.78GHz
+
+
+ * 骁龙845
+ * xiaomi mi8, 845
+ * 2.8GHz(大四核),1.7GHz(小四核)
+
+ * 骁龙835
+ * xiaomi mix2, snapdragon 835
+ * 2.45GHz(大四核),1.9GHz(小四核)
+
+ * 骁龙625
+ * oppo R9s, snapdragon625
+ * A53 x 8, big core@2.0GHz
+
+ * 骁龙653
+ * 360 N5, snapdragon 653
+ * 4 x A73@2.0GHz + 4 x A53@1.4GHz
+
+ * 麒麟970
+ * HUAWEI Mate10
+
+* 测试说明
+ * commit id: 12c129affaacd476e27a0a82b235a9d547d33f0f
+ * warmup=10, repeats=30,统计平均时间,单位是ms
+ * 当线程数为1时,```DeviceInfo::Global().SetRunMode```设置LITE_POWER_HIGH,否者设置LITE_POWER_NO_BIND
+ * 模型的输入图像的维度是{1, 3, 224, 224},输入图像的每一位数值是1
+
+## 测试数据
+
+### fp32 模型测试数据
+
+## 测试数据
+
+### fp32 模型测试数据
+
+骁龙855 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1 | 31.64 | 18.98 | 10.67 | 33.17 | 19.55 | 11.43
+ mobilenet_v2 | 25.54 | 13.80 | 8.75 | 29.25 | 15.19 | 9.65
+ squeezenet_v1.1 | 26.81 | 14.39 | 8.92 | 28.63 | 15.37 | 9.53
+ mnasnet | 25.39 | 13.89 | 9.63 | 28.97 | 15.54 | 10.10
+ shufflenet_v2 | 13.85 | 7.81 | 5.87 | 14.64 | 8.35 | 6.14
+
+
+ 骁龙845 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1 | 62.04 | 33.63 | 18.63 | 66.23 | 35.78 | 20.14
+ mobilenet_v2 | 40.41 | 22.94 | 13.33 | 44.22 | 24.58 | 14.50
+ squeezenet_v1.1 | 49.92 | 23.78 | 13.86 | 52.00 | 24.85 | 15.87
+ mnasnet | 40.14 | 23.36 | 14.46 | 43.77 | 24.78 | 14.76
+ shufflenet_v2 | 22.27 | 13.69 | 8.96 | 26.11 | 14.95 | 9.02
+
+
+ 骁龙835 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1 | 89.57 | 50.88 | 27.62 | 96.11 | 53.18 | 31.99
+ mobilenet_v2 | 59.92 | 33.93 | 20.91 | 64.04 | 36.85 | 23.10
+ squeezenet_v1.1 | 65.25 | 37.92 | 23.40 | 74.87 | 40.96 | 23.69
+ mnasnet | 60.97 | 35.04 | 22.40 | 64.88 | 37.90 | 24.53
+ shufflenet_v2 | 30.87 | 19.33 | 12.78 | 31.71 | 19.52 | 13.25
+
+
+ 骁龙625 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1 | 180.98 | 92.27 | 51.51 | 216.12 | 110.33 | 61.68
+ mobilenet_v2 | 132.46 | 68.38 | 43.54 | 146.18 | 76.62 | 46.21
+ squeezenet_v1.1 | 124.49 | 66.84 | 41.53 | 153.28 | 82.42 | 47.14
+ mnasnet | 122.50 | 67.46 | 43.04 | 146.20 | 79.64 | 48.56
+ shufflenet_v2 | 68.70 | 40.77 | 26.53 | 75.38 | 42.40 | 28.36
+
+
+ 骁龙653 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1 | 121.27 | 59.36 | 34.06 | 126.55 | 64.96 | 39.23
+ mobilenet_v2 | 79.48 | 46.17 | 27.81 | 87.93 | 48.28 | 31.87
+ squeezenet_v1.1 | 81.10 | 42.66 | 42.07 | 82.29 | 45.88 | 28.84
+ mnasnet | 75.60 | 44.22 | 30.16 | 82.99 | 49.07 | 32.34
+ shufflenet_v2 | 39.18 | 23.54 | 16.73 | 40.12 | 24.76 | 17.68
+
+
+
+ 麒麟970 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1 | 99.58 | 56.91 | 29.02 | 102.42 | 57.81 | 35.36
+ mobilenet_v2 | 69.22 | 42.41 | 23.55 | 69.49 | 43.38 | 25.26
+ squeezenet_v1.1 | 67.48 | 41.06 | 24.47 | 75.03 | 43.57 | 26.35
+ mnasnet | 74.55 | 43.06 | 24.22 | 75.48 | 44.43 | 26.69
+ shufflenet_v2 | 39.20 | 24.54 | 16.34 | 37.40 | 24.32 | 16.66
+
+### int8 模型测试数据
+
+骁龙855 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1_int8 | 16.77 | 8.38 | 4.59 | 43.42 | 20.80 | 10.89
+ mobilenet_v2_int8 | 22.81 | 13.71 | 10.43 | 29.65 | 20.09 | 13.99
+ resnet50_int8 | 258.83 | 157.22 | 85.83 | 424.99 | 209.37 | 112.32
+
+ 骁龙845 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1_int8 | 44.08 | 23.75 | 12.52 | 49.19 | 26.77 | 13.82
+ mobilenet_v2_int8 | 36.61 | 22.70 | 15.29 | 40.51 | 25.84 | 17.89
+ resnet50_int8 | 399.64 | 217.74 | 112.86 | 408.80 | 224.72 | 122.15
+
+ 骁龙835 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1_int8 | 59.99 | 31.59 | 16.55 | 62.92 | 33.33 | 17.38
+ mobilenet_v2_int8 | 50.68 | 31.25 | 21.62 | 52.56 | 33.88 | 24.31
+ resnet50_int8 | 498.85 | 267.65 | 146.03 | 510.54 | 278.77 | 155.05
+
+ 骁龙625 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1_int8 | 122.86 | 63.52 | 33.91 | 125.77 | 64.78 | 34.25
+ mobilenet_v2_int8 | 110.71 | 67.76 | 49.85 | 114.63 | 71.74 | 51.73
+ resnet50_int8 | 954.67 | 505.78 | 286.64 | 1016.64 | 532.84 | 305.20
+
+ 骁龙653 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1_int8 | 81.46 | 42.99 | 31.69 | 81.20 | 42.46 | 23.47
+ mobilenet_v2_int8 | 68.39 | 43.47 | 32.03 | 69.40 | 44.47 | 33.46
+ resnet50_int8 | 687.59 | 369.70 | 208.99 | 684.55 | 369.04 | 208.42
+
+ 麒麟970 | armv8 | | |armv7 |||
+---- | ---- | ---- | ---- |---- |----| ----|
+num_threads | 1 | 2 | 4 |1 |2| 4
+ mobilenet_v1_int8 | 64.27 | 35.48 | 18.76 | 64.63 | 37.67 | 20.70
+ mobilenet_v2_int8 | 64.54 | 36.76 | 22.17 | 68.80 | 38.85 | 24.30
+ resnet50_int8 | 509.94 | 268.95 | 276.13 | 520.57 | 281.92 | 157.82
+
diff --git a/benchmark_tools.md b/benchmark_tools.md
new file mode 100644
index 0000000000000000000000000000000000000000..8148f712f0ec24d71277a2cb637dd84adb38c0c7
--- /dev/null
+++ b/benchmark_tools.md
@@ -0,0 +1,196 @@
+
+ * [Benchmark](#Benchmark)
+ * [环境准备](#环境准备)
+ * [1. 一键Benchmark](#一-一键benchmark)
+ * [2. 逐步Benchmark](#二-逐步Benchmark)
+ * [1. 获取benchmark可执行文件](#1-获取benchmark可执行文件)
+ * [2. 下载模型](#2-下载模型)
+ * [3. benchmark.sh脚本](#3-benchmark-sh脚本)
+ * [4. 测试](#4-测试)
+
+
+# Benchmark
+
+本文将会介绍,在**Ubuntu:16.04交叉编译环境**下,用安卓手机在终端测试Paddle-Lite的性能,并介绍两种Benchmark方法:
+
+1. **一键Benchmark**:适用于想快速获得常见模型性能的用户,下载预编译好的benchmark可执行文件;
+2. **逐步Benchmark**:将**一键Benchmark**流程拆解讲解。
+
+# 环境准备
+
+1. 准备[adb](https://developer.android.com/studio/command-line/adb)等必备软件:
+```shell
+sudo apt update
+sudo apt install -y wget adb
+```
+2. 检查手机与电脑连接。安卓手机USB连上电脑,打开设置 -> 开启开发者模式 -> 开启USB调试 -> 允许(授权)当前电脑调试手机;
+3. 在电脑终端输入`adb devices`命令,查看当前连接到的设备:
+```shell
+adb devices
+```
+命令成功执行,显示结果类似下面(序列码略有不同):
+```shell
+List of devices attached
+712QSDSEMMS7C device
+```
+
+## 一. 一键Benchmark
+
+执行以下命令,完成Benchmark:
+
+```shell
+wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/run_benchmark.sh
+sh run_benchmark.sh
+```
+
+该`run_benchmark.sh`脚本会:
+
+1. 下载模型,并上传手机:包含mobilenetv1/v2、shufflenetv2、squeezenetv1.1、mnasnet;
+2. 下载pre-built android-armv7和android-armv8的可执行文件,并上传手机:`benchmark_bin_v7`和`benchmark_bin_v8`;
+3. 自动执行另一个脚本`benchmark.sh`(多台手机连接USB,请在`benchmark.sh`脚本中对`adb`命令后加上测试手机的`serial number`);
+4. 从手机下载benchmark结果`result_armv7.txt`和`result_armv8.txt`,到当前目录,并显示Benchmark结果。
+
+## 二. 逐步Benchmark
+
+### 1. 获取benchmark可执行文件
+
+benchmark_bin文件可以测试PaddleLite的性能,有下面两种方式获得。
+
+#### 方式一:下载benchmark_bin可执行文件
+
+```shell
+# Download benchmark_bin for android-armv7
+wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_bin_v7
+
+# Download benchmark_bin for android-armv8
+wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_bin_v8
+```
+
+#### 方式二:由源码编译benchmark_bin文件
+
+根据[源码编译](./source_compile)准备编译环境,拉取PaddleLite最新release发布版代码,并在仓库根目录下,执行:
+
+```shell
+###########################################
+# Build benchmark_bin for android-armv7 #
+###########################################
+./lite/tools/ci_build.sh \
+ --arm_os="android" \
+ --arm_abi="armv7" \
+ --arm_lang="gcc " \
+ build_arm
+
+# build result see: /build.lite.android.armv7.gcc/lite/api/benchmark_bin
+
+###########################################
+# Build benchmark_bin for android-armv8 #
+###########################################
+./lite/tools/ci_build.sh \
+ --arm_os="android" \
+ --arm_abi="armv8" \
+ --arm_lang="gcc " \
+ build_arm
+
+# build result see: /build.lite.android.armv8.gcc/lite/api/benchmark_bin
+```
+
+> **注意**:为了避免在docker内部访问不到手机的问题,建议编译得到benchmark_bin后退出到docker外面,并且将benchmark_bin文件拷贝到一个临时目录。然后在该临时目录下,按照下面步骤下载模型、拷贝脚本、测试。
+
+### 2. 下载模型
+
+PaddleLite为Benchmark准备好了[常见Benchmark模型](https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_models.tar.gz)。
+
+执行以下命令,下载常见Benchmark模型并解压:
+
+```shell
+wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_models.tar.gz
+tar zxvf benchmark_models.tar.gz
+```
+
+| 模型 | 下载地址 |
+| --------------- | ------------------------------------------------------------ |
+| MobilenetV1 | [下载](https://paddle-inference-dist.bj.bcebos.com/PaddleLite/mobilenet_v1.tar.gz) |
+| MobilenetV2 | [下载](https://paddle-inference-dist.bj.bcebos.com/PaddleLite/mobilenet_v2.tar.gz) |
+| ShufflenetV2 | [下载](https://paddle-inference-dist.bj.bcebos.com/PaddleLite/shufflenet_v2.tar.gz) |
+| Squeezenet_V1.1 | [下载](https://paddle-inference-dist.bj.bcebos.com/PaddleLite/squeezenet_v11.tar.gz) |
+| Mnasnet | [下载](https://paddle-inference-dist.bj.bcebos.com/PaddleLite/mnasnet.tar.gz) |
+
+> 注:若要使用测试脚本,**对单个模型测试**,请把单个模型放入 `benchmark_models` 文件夹,并确保测试脚本、`benchmark_models`文件夹在同一级的目录。
+
+注:上述模型都已经使用`model_optimize_tool`进行转化,而且Lite移动端只支持加载转化后的模型。如果需要测试其他模型,请先参考[模型转化方法](./model_optimize_tool)。
+
+
+### 3. benchmark.sh脚本
+
+benchmark测试的执行脚本`benchmark.sh` 位于源码中的`/PaddleLite/lite/tools/benchmark.sh`位置,测试时需要将`benchmark.sh`、 `benchmark_bin` 、 `benchmark_models` 文件复制到同一目录下。
+
+### 4. 测试
+
+从终端进入benchmark.sh、可执行文件(benchmark_bin_v7、benchmark_bin_v8)和模型文件(benchmark_models)所在文件夹。
+
+运行 benchmark.sh 脚本执行测试
+
+```shell
+# Benchmark for android-armv7
+sh benchmark.sh ./benchmark_bin_v7 ./benchmark_models result_armv7.txt
+
+# Benchmark for android-armv8
+sh benchmark.sh ./benchmark_bin_v8 ./benchmark_models result_armv8.txt
+```
+测试结束后,armv7和armv8的结果,分别保存在当前目录下的`result_armv7.txt`和`result_armv8.txt`文件中。
+
+**查看测试结果**
+
+在当前目录的`result_armv7.txt`和`result_armv8.txt`文件,查看测试结果。
+
+```shell
+run benchmark armv7
+--------------------------------------
+PaddleLite Benchmark
+Threads=1 Warmup=10 Repeats=30
+-- mnasnet avg = 159.8427 ms
+-- mobilenet_v1 avg = 235.0072 ms
+-- mobilenet_v2 avg = 173.0387 ms
+-- shufflenet_v2 avg = 76.0040 ms
+-- squeezenet_v11 avg = 164.2957 ms
+
+Threads=2 Warmup=10 Repeats=30
+-- mnasnet avg = 83.1287 ms
+-- mobilenet_v1 avg = 121.6029 ms
+-- mobilenet_v2 avg = 86.6175 ms
+-- shufflenet_v2 avg = 41.5761 ms
+-- squeezenet_v11 avg = 87.8678 ms
+
+Threads=4 Warmup=10 Repeats=30
+-- mnasnet avg = 73.3880 ms
+-- mobilenet_v1 avg = 119.0739 ms
+-- mobilenet_v2 avg = 85.3050 ms
+-- shufflenet_v2 avg = 38.0762 ms
+-- squeezenet_v11 avg = 64.2201 ms
+--------------------------------------
+
+run benchmark armv8
+--------------------------------------
+PaddleLite Benchmark
+Threads=1 Warmup=10 Repeats=30
+-- mnasnet avg = 165.3073 ms
+-- mobilenet_v1 avg = 306.0188 ms
+-- mobilenet_v2 avg = 195.1884 ms
+-- shufflenet_v2 avg = 99.3692 ms
+-- squeezenet_v11 avg = 156.6971 ms
+
+Threads=2 Warmup=10 Repeats=30
+-- mnasnet avg = 90.2290 ms
+-- mobilenet_v1 avg = 157.0007 ms
+-- mobilenet_v2 avg = 118.1607 ms
+-- shufflenet_v2 avg = 68.6804 ms
+-- squeezenet_v11 avg = 91.3090 ms
+
+Threads=4 Warmup=10 Repeats=30
+-- mnasnet avg = 179.9730 ms
+-- mobilenet_v1 avg = 204.0684 ms
+-- mobilenet_v2 avg = 181.6486 ms
+-- shufflenet_v2 avg = 123.2728 ms
+-- squeezenet_v11 avg = 412.9046 ms
+--------------------------------------
+```
\ No newline at end of file
diff --git a/benchmark_tools.md.toc.2019-08-25_233116 b/benchmark_tools.md.toc.2019-08-25_233116
new file mode 100644
index 0000000000000000000000000000000000000000..6fbec144e8cce694daae03bffee89c70f47f9992
--- /dev/null
+++ b/benchmark_tools.md.toc.2019-08-25_233116
@@ -0,0 +1,11 @@
+ * [Benchmark 测试方法](#benchmark-测试方法)
+ * [1. 一键Benchmark](#1-一键benchmark)
+ * [2. 逐步测试说明](#2-逐步测试说明)
+ * [1. benchmark可执行文件](#1-benchmark可执行文件)
+ * [2. 下载模型](#2-下载模型)
+ * [3. benchmark.sh 脚本](#3-benchmarksh-脚本)
+ * [4. 测试](#4-测试)
+ * [3. 完整实例](#3-完整实例)
+
+
+
diff --git a/benchmark_tools.md.toc.2019-08-25_233528 b/benchmark_tools.md.toc.2019-08-25_233528
new file mode 100644
index 0000000000000000000000000000000000000000..238a7cb053ffec84a6e4b831b6a4bfd64ba4ec8a
--- /dev/null
+++ b/benchmark_tools.md.toc.2019-08-25_233528
@@ -0,0 +1,11 @@
+ * [Benchmark 测试方法](#benchmark-测试方法)
+ * [1. 一键Benchmark](#1-一键benchmark)
+ * [2. 逐步测试说明](#2-逐步测试说明)
+ * [1. benchmark可执行文件](#1-benchmark可执行文件)
+ * [2. 下载模型](#2-下载模型)
+ * [3. benchmark.sh 脚本](#3-benchmarksh-脚本)
+ * [4. 测试](#4-测试)
+ * [3. 完整实例](#3-完整实例)
+
+
+
diff --git a/cmake/FindGflags.cmake b/cmake/FindGflags.cmake
deleted file mode 100644
index 6587089ba382dc09195298b486da6630f2df236b..0000000000000000000000000000000000000000
--- a/cmake/FindGflags.cmake
+++ /dev/null
@@ -1,582 +0,0 @@
-# Ceres Solver - A fast non-linear least squares minimizer
-# Copyright 2015 Google Inc. All rights reserved.
-# http://ceres-solver.org/
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-# * Neither the name of Google Inc. nor the names of its contributors may be
-# used to endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Author: alexs.mac@gmail.com (Alex Stewart)
-#
-
-# FindGflags.cmake - Find Google gflags logging library.
-#
-# This module will attempt to find gflags, either via an exported CMake
-# configuration (generated by gflags >= 2.1 which are built with CMake), or
-# by performing a standard search for all gflags components. The order of
-# precedence for these two methods of finding gflags is controlled by:
-# GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION.
-#
-# This module defines the following variables:
-#
-# GFLAGS_FOUND: TRUE iff gflags is found.
-# GFLAGS_INCLUDE_DIRS: Include directories for gflags.
-# GFLAGS_LIBRARIES: Libraries required to link gflags.
-# GFLAGS_NAMESPACE: The namespace in which gflags is defined. In versions of
-# gflags < 2.1, this was google, for versions >= 2.1 it is
-# by default gflags, although can be configured when building
-# gflags to be something else (i.e. google for legacy
-# compatibility).
-#
-# The following variables control the behaviour of this module when an exported
-# gflags CMake configuration is not found.
-#
-# GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION: TRUE/FALSE, iff TRUE then
-# then prefer using an exported CMake configuration
-# generated by gflags >= 2.1 over searching for the
-# gflags components manually. Otherwise (FALSE)
-# ignore any exported gflags CMake configurations and
-# always perform a manual search for the components.
-# Default: TRUE iff user does not define this variable
-# before we are called, and does NOT specify either
-# GFLAGS_INCLUDE_DIR_HINTS or GFLAGS_LIBRARY_DIR_HINTS
-# otherwise FALSE.
-# GFLAGS_INCLUDE_DIR_HINTS: List of additional directories in which to
-# search for gflags includes, e.g: /timbuktu/include.
-# GFLAGS_LIBRARY_DIR_HINTS: List of additional directories in which to
-# search for gflags libraries, e.g: /timbuktu/lib.
-#
-# The following variables are also defined by this module, but in line with
-# CMake recommended FindPackage() module style should NOT be referenced directly
-# by callers (use the plural variables detailed above instead). These variables
-# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which
-# are NOT re-called (i.e. search for library is not repeated) if these variables
-# are set with valid values _in the CMake cache_. This means that if these
-# variables are set directly in the cache, either by the user in the CMake GUI,
-# or by the user passing -DVAR=VALUE directives to CMake when called (which
-# explicitly defines a cache variable), then they will be used verbatim,
-# bypassing the HINTS variables and other hard-coded search locations.
-#
-# GFLAGS_INCLUDE_DIR: Include directory for gflags, not including the
-# include directory of any dependencies.
-# GFLAGS_LIBRARY: gflags library, not including the libraries of any
-# dependencies.
-
-# Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when FindGflags was
-# invoked, necessary for MSVC.
-macro(GFLAGS_RESET_FIND_LIBRARY_PREFIX)
- if (MSVC)
- set(CMAKE_FIND_LIBRARY_PREFIXES "${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}")
- endif (MSVC)
-endmacro(GFLAGS_RESET_FIND_LIBRARY_PREFIX)
-
-# Called if we failed to find gflags or any of it's required dependencies,
-# unsets all public (designed to be used externally) variables and reports
-# error message at priority depending upon [REQUIRED/QUIET/] argument.
-macro(GFLAGS_REPORT_NOT_FOUND REASON_MSG)
- unset(GFLAGS_FOUND)
- unset(GFLAGS_INCLUDE_DIRS)
- unset(GFLAGS_LIBRARIES)
- # Do not use unset, as we want to keep GFLAGS_NAMESPACE in the cache,
- # but simply clear its value.
- set(GFLAGS_NAMESPACE "" CACHE STRING
- "gflags namespace (google or gflags)" FORCE)
-
- # Make results of search visible in the CMake GUI if gflags has not
- # been found so that user does not have to toggle to advanced view.
- mark_as_advanced(CLEAR GFLAGS_INCLUDE_DIR
- GFLAGS_LIBRARY
- GFLAGS_NAMESPACE)
-
- gflags_reset_find_library_prefix()
-
- # Note _FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()
- # use the camelcase library name, not uppercase.
- if (Gflags_FIND_QUIETLY)
- message(STATUS "Failed to find gflags - " ${REASON_MSG} ${ARGN})
- elseif (Gflags_FIND_REQUIRED)
- message(FATAL_ERROR "Failed to find gflags - " ${REASON_MSG} ${ARGN})
- else()
- # Neither QUIETLY nor REQUIRED, use no priority which emits a message
- # but continues configuration and allows generation.
- message("-- Failed to find gflags - " ${REASON_MSG} ${ARGN})
- endif ()
- return()
-endmacro(GFLAGS_REPORT_NOT_FOUND)
-
-# Verify that all variable names passed as arguments are defined (can be empty
-# but must be defined) or raise a fatal error.
-macro(GFLAGS_CHECK_VARS_DEFINED)
- foreach(CHECK_VAR ${ARGN})
- if (NOT DEFINED ${CHECK_VAR})
- message(FATAL_ERROR "Ceres Bug: ${CHECK_VAR} is not defined.")
- endif()
- endforeach()
-endmacro(GFLAGS_CHECK_VARS_DEFINED)
-
-# Use check_cxx_source_compiles() to compile trivial test programs to determine
-# the gflags namespace. This works on all OSs except Windows. If using Visual
-# Studio, it fails because msbuild forces check_cxx_source_compiles() to use
-# CMAKE_BUILD_TYPE=Debug for the test project, which usually breaks detection
-# because MSVC requires that the test project use the same build type as gflags,
-# which would normally be built in Release.
-#
-# Defines: GFLAGS_NAMESPACE in the caller's scope with the detected namespace,
-# which is blank (empty string, will test FALSE is CMake conditionals)
-# if detection failed.
-function(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE)
- # Verify that all required variables are defined.
- gflags_check_vars_defined(
- GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY)
- # Ensure that GFLAGS_NAMESPACE is always unset on completion unless
- # we explicitly set if after having the correct namespace.
- set(GFLAGS_NAMESPACE "" PARENT_SCOPE)
-
- include(CheckCXXSourceCompiles)
- # Setup include path & link library for gflags for CHECK_CXX_SOURCE_COMPILES.
- set(CMAKE_REQUIRED_INCLUDES ${GFLAGS_INCLUDE_DIR})
- set(CMAKE_REQUIRED_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES})
- # First try the (older) google namespace. Note that the output variable
- # MUST be unique to the build type as otherwise the test is not repeated as
- # it is assumed to have already been performed.
- check_cxx_source_compiles(
- "#include
- int main(int argc, char * argv[]) {
- google::ParseCommandLineFlags(&argc, &argv, true);
- return 0;
- }"
- GFLAGS_IN_GOOGLE_NAMESPACE)
- if (GFLAGS_IN_GOOGLE_NAMESPACE)
- set(GFLAGS_NAMESPACE google PARENT_SCOPE)
- return()
- endif()
-
- # Try (newer) gflags namespace instead. Note that the output variable
- # MUST be unique to the build type as otherwise the test is not repeated as
- # it is assumed to have already been performed.
- set(CMAKE_REQUIRED_INCLUDES ${GFLAGS_INCLUDE_DIR})
- set(CMAKE_REQUIRED_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES})
- check_cxx_source_compiles(
- "#include
- int main(int argc, char * argv[]) {
- gflags::ParseCommandLineFlags(&argc, &argv, true);
- return 0;
- }"
- GFLAGS_IN_GFLAGS_NAMESPACE)
- if (GFLAGS_IN_GFLAGS_NAMESPACE)
- set(GFLAGS_NAMESPACE gflags PARENT_SCOPE)
- return()
- endif (GFLAGS_IN_GFLAGS_NAMESPACE)
-endfunction(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE)
-
-# Use regex on the gflags headers to attempt to determine the gflags namespace.
-# Checks both gflags.h (contained namespace on versions < 2.1.2) and
-# gflags_declare.h, which contains the namespace on versions >= 2.1.2.
-# In general, this method should only be used when
-# GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_TRY_COMPILE() cannot be used, or has
-# failed.
-#
-# Defines: GFLAGS_NAMESPACE in the caller's scope with the detected namespace,
-# which is blank (empty string, will test FALSE is CMake conditionals)
-# if detection failed.
-function(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_REGEX)
- # Verify that all required variables are defined.
- gflags_check_vars_defined(GFLAGS_INCLUDE_DIR)
- # Ensure that GFLAGS_NAMESPACE is always undefined on completion unless
- # we explicitly set if after having the correct namespace.
- set(GFLAGS_NAMESPACE "" PARENT_SCOPE)
-
- # Scan gflags.h to identify what namespace gflags was built with. On
- # versions of gflags < 2.1.2, gflags.h was configured with the namespace
- # directly, on >= 2.1.2, gflags.h uses the GFLAGS_NAMESPACE #define which
- # is defined in gflags_declare.h, we try each location in turn.
- set(GFLAGS_HEADER_FILE ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h)
- if (NOT EXISTS ${GFLAGS_HEADER_FILE})
- gflags_report_not_found(
- "Could not find file: ${GFLAGS_HEADER_FILE} "
- "containing namespace information in gflags install located at: "
- "${GFLAGS_INCLUDE_DIR}.")
- endif()
- file(READ ${GFLAGS_HEADER_FILE} GFLAGS_HEADER_FILE_CONTENTS)
-
- string(REGEX MATCH "namespace [A-Za-z]+"
- GFLAGS_NAMESPACE "${GFLAGS_HEADER_FILE_CONTENTS}")
- string(REGEX REPLACE "namespace ([A-Za-z]+)" "\\1"
- GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}")
-
- if (NOT GFLAGS_NAMESPACE)
- gflags_report_not_found(
- "Failed to extract gflags namespace from header file: "
- "${GFLAGS_HEADER_FILE}.")
- endif (NOT GFLAGS_NAMESPACE)
-
- if (GFLAGS_NAMESPACE STREQUAL "google" OR
- GFLAGS_NAMESPACE STREQUAL "gflags")
- # Found valid gflags namespace from gflags.h.
- set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" PARENT_SCOPE)
- return()
- endif()
-
- # Failed to find gflags namespace from gflags.h, gflags is likely a new
- # version, check gflags_declare.h, which in newer versions (>= 2.1.2) contains
- # the GFLAGS_NAMESPACE #define, which is then referenced in gflags.h.
- set(GFLAGS_DECLARE_FILE ${GFLAGS_INCLUDE_DIR}/gflags/gflags_declare.h)
- if (NOT EXISTS ${GFLAGS_DECLARE_FILE})
- gflags_report_not_found(
- "Could not find file: ${GFLAGS_DECLARE_FILE} "
- "containing namespace information in gflags install located at: "
- "${GFLAGS_INCLUDE_DIR}.")
- endif()
- file(READ ${GFLAGS_DECLARE_FILE} GFLAGS_DECLARE_FILE_CONTENTS)
-
- string(REGEX MATCH "#define GFLAGS_NAMESPACE [A-Za-z]+"
- GFLAGS_NAMESPACE "${GFLAGS_DECLARE_FILE_CONTENTS}")
- string(REGEX REPLACE "#define GFLAGS_NAMESPACE ([A-Za-z]+)" "\\1"
- GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}")
-
- if (NOT GFLAGS_NAMESPACE)
- gflags_report_not_found(
- "Failed to extract gflags namespace from declare file: "
- "${GFLAGS_DECLARE_FILE}.")
- endif (NOT GFLAGS_NAMESPACE)
-
- if (GFLAGS_NAMESPACE STREQUAL "google" OR
- GFLAGS_NAMESPACE STREQUAL "gflags")
- # Found valid gflags namespace from gflags.h.
- set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" PARENT_SCOPE)
- return()
- endif()
-endfunction(GFLAGS_CHECK_GFLAGS_NAMESPACE_USING_REGEX)
-
-# -----------------------------------------------------------------
-# By default, if the user has expressed no preference for using an exported
-# gflags CMake configuration over performing a search for the installed
-# components, and has not specified any hints for the search locations, then
-# prefer a gflags exported configuration if available.
-if (NOT DEFINED GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION
- AND NOT GFLAGS_INCLUDE_DIR_HINTS
- AND NOT GFLAGS_LIBRARY_DIR_HINTS)
- message(STATUS "No preference for use of exported gflags CMake configuration "
- "set, and no hints for include/library directories provided. "
- "Defaulting to preferring an installed/exported gflags CMake configuration "
- "if available.")
- set(GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION TRUE)
-endif()
-
-if (GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION)
- # Try to find an exported CMake configuration for gflags, as generated by
- # gflags versions >= 2.1.
- #
- # We search twice, s/t we can invert the ordering of precedence used by
- # find_package() for exported package build directories, and installed
- # packages (found via CMAKE_SYSTEM_PREFIX_PATH), listed as items 6) and 7)
- # respectively in [1].
- #
- # By default, exported build directories are (in theory) detected first, and
- # this is usually the case on Windows. However, on OS X & Linux, the install
- # path (/usr/local) is typically present in the PATH environment variable
- # which is checked in item 4) in [1] (i.e. before both of the above, unless
- # NO_SYSTEM_ENVIRONMENT_PATH is passed). As such on those OSs installed
- # packages are usually detected in preference to exported package build
- # directories.
- #
- # To ensure a more consistent response across all OSs, and as users usually
- # want to prefer an installed version of a package over a locally built one
- # where both exist (esp. as the exported build directory might be removed
- # after installation), we first search with NO_CMAKE_PACKAGE_REGISTRY which
- # means any build directories exported by the user are ignored, and thus
- # installed directories are preferred. If this fails to find the package
- # we then research again, but without NO_CMAKE_PACKAGE_REGISTRY, so any
- # exported build directories will now be detected.
- #
- # To prevent confusion on Windows, we also pass NO_CMAKE_BUILDS_PATH (which
- # is item 5) in [1]), to not preferentially use projects that were built
- # recently with the CMake GUI to ensure that we always prefer an installed
- # version if available.
- #
- # [1] http://www.cmake.org/cmake/help/v2.8.11/cmake.html#command:find_package
- find_package(gflags QUIET
- NO_MODULE
- NO_CMAKE_PACKAGE_REGISTRY
- NO_CMAKE_BUILDS_PATH)
- if (gflags_FOUND)
- message(STATUS "Found installed version of gflags: ${gflags_DIR}")
- else(gflags_FOUND)
- # Failed to find an installed version of gflags, repeat search allowing
- # exported build directories.
- message(STATUS "Failed to find installed gflags CMake configuration, "
- "searching for gflags build directories exported with CMake.")
- # Again pass NO_CMAKE_BUILDS_PATH, as we know that gflags is exported and
- # do not want to treat projects built with the CMake GUI preferentially.
- find_package(gflags QUIET
- NO_MODULE
- NO_CMAKE_BUILDS_PATH)
- if (gflags_FOUND)
- message(STATUS "Found exported gflags build directory: ${gflags_DIR}")
- endif(gflags_FOUND)
- endif(gflags_FOUND)
-
- set(FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION ${gflags_FOUND})
-
- # gflags v2.1 - 2.1.2 shipped with a bug in their gflags-config.cmake [1]
- # whereby gflags_LIBRARIES = "gflags", but there was no imported target
- # called "gflags", they were called: gflags[_nothreads]-[static/shared].
- # As this causes linker errors when gflags is not installed in a location
- # on the current library paths, detect if this problem is present and
- # fix it.
- #
- # [1] https://github.com/gflags/gflags/issues/110
- if (gflags_FOUND)
- # NOTE: This is not written as additional conditions in the outer
- # if (gflags_FOUND) as the NOT TARGET "${gflags_LIBRARIES}"
- # condition causes problems if gflags is not found.
- if (${gflags_VERSION} VERSION_LESS 2.1.3 AND
- NOT TARGET "${gflags_LIBRARIES}")
- message(STATUS "Detected broken gflags install in: ${gflags_DIR}, "
- "version: ${gflags_VERSION} <= 2.1.2 which defines gflags_LIBRARIES = "
- "${gflags_LIBRARIES} which is not an imported CMake target, see: "
- "https://github.com/gflags/gflags/issues/110. Attempting to fix by "
- "detecting correct gflags target.")
- # Ordering here expresses preference for detection, specifically we do not
- # want to use the _nothreads variants if the full library is available.
- list(APPEND CHECK_GFLAGS_IMPORTED_TARGET_NAMES
- gflags-shared gflags-static
- gflags_nothreads-shared gflags_nothreads-static)
- foreach(CHECK_GFLAGS_TARGET ${CHECK_GFLAGS_IMPORTED_TARGET_NAMES})
- if (TARGET ${CHECK_GFLAGS_TARGET})
- message(STATUS "Found valid gflags target: ${CHECK_GFLAGS_TARGET}, "
- "updating gflags_LIBRARIES.")
- set(gflags_LIBRARIES ${CHECK_GFLAGS_TARGET})
- break()
- endif()
- endforeach()
- if (NOT TARGET ${gflags_LIBRARIES})
- message(STATUS "Failed to fix detected broken gflags install in: "
- "${gflags_DIR}, version: ${gflags_VERSION} <= 2.1.2, none of the "
- "imported targets for gflags: ${CHECK_GFLAGS_IMPORTED_TARGET_NAMES} "
- "are defined. Will continue with a manual search for gflags "
- "components. We recommend you build/install a version of gflags > "
- "2.1.2 (or master).")
- set(FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION FALSE)
- endif()
- endif()
- endif()
-
- if (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION)
- message(STATUS "Detected gflags version: ${gflags_VERSION}")
- set(GFLAGS_FOUND ${gflags_FOUND})
- set(GFLAGS_INCLUDE_DIR ${gflags_INCLUDE_DIR})
- set(GFLAGS_LIBRARY ${gflags_LIBRARIES})
-
- # gflags does not export the namespace in their CMake configuration, so
- # use our function to determine what it should be, as it can be either
- # gflags or google dependent upon version & configuration.
- #
- # NOTE: We use the regex method to determine the namespace here, as
- # check_cxx_source_compiles() will not use imported targets, which
- # is what gflags will be in this case.
- gflags_check_gflags_namespace_using_regex()
-
- if (NOT GFLAGS_NAMESPACE)
- gflags_report_not_found(
- "Failed to determine gflags namespace using regex for gflags "
- "version: ${gflags_VERSION} exported here: ${gflags_DIR} using CMake.")
- endif (NOT GFLAGS_NAMESPACE)
- else (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION)
- message(STATUS "Failed to find an installed/exported CMake configuration "
- "for gflags, will perform search for installed gflags components.")
- endif (FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION)
-endif(GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION)
-
-if (NOT GFLAGS_FOUND)
- # Either failed to find an exported gflags CMake configuration, or user
- # told us not to use one. Perform a manual search for all gflags components.
-
- # Handle possible presence of lib prefix for libraries on MSVC, see
- # also GFLAGS_RESET_FIND_LIBRARY_PREFIX().
- if (MSVC)
- # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES
- # s/t we can set it back before returning.
- set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES "${CMAKE_FIND_LIBRARY_PREFIXES}")
- # The empty string in this list is important, it represents the case when
- # the libraries have no prefix (shared libraries / DLLs).
- set(CMAKE_FIND_LIBRARY_PREFIXES "lib" "" "${CMAKE_FIND_LIBRARY_PREFIXES}")
- endif (MSVC)
-
- # Search user-installed locations first, so that we prefer user installs
- # to system installs where both exist.
- list(APPEND GFLAGS_CHECK_INCLUDE_DIRS
- /usr/local/include
- /usr/local/homebrew/include # Mac OS X
- /opt/local/var/macports/software # Mac OS X.
- /opt/local/include
- /usr/include)
- list(APPEND GFLAGS_CHECK_PATH_SUFFIXES
- gflags/include # Windows (for C:/Program Files prefix).
- gflags/Include ) # Windows (for C:/Program Files prefix).
-
- list(APPEND GFLAGS_CHECK_LIBRARY_DIRS
- /usr/local/lib
- /usr/local/homebrew/lib # Mac OS X.
- /opt/local/lib
- /usr/lib)
- list(APPEND GFLAGS_CHECK_LIBRARY_SUFFIXES
- gflags/lib # Windows (for C:/Program Files prefix).
- gflags/Lib ) # Windows (for C:/Program Files prefix).
-
- # Search supplied hint directories first if supplied.
- find_path(GFLAGS_INCLUDE_DIR
- NAMES gflags/gflags.h
- PATHS ${GFLAGS_INCLUDE_DIR_HINTS}
- ${GFLAGS_CHECK_INCLUDE_DIRS}
- PATH_SUFFIXES ${GFLAGS_CHECK_PATH_SUFFIXES})
- if (NOT GFLAGS_INCLUDE_DIR OR
- NOT EXISTS ${GFLAGS_INCLUDE_DIR})
- gflags_report_not_found(
- "Could not find gflags include directory, set GFLAGS_INCLUDE_DIR "
- "to directory containing gflags/gflags.h")
- endif (NOT GFLAGS_INCLUDE_DIR OR
- NOT EXISTS ${GFLAGS_INCLUDE_DIR})
-
- find_library(GFLAGS_LIBRARY NAMES gflags
- PATHS ${GFLAGS_LIBRARY_DIR_HINTS}
- ${GFLAGS_CHECK_LIBRARY_DIRS}
- PATH_SUFFIXES ${GFLAGS_CHECK_LIBRARY_SUFFIXES})
- if (NOT GFLAGS_LIBRARY OR
- NOT EXISTS ${GFLAGS_LIBRARY})
- gflags_report_not_found(
- "Could not find gflags library, set GFLAGS_LIBRARY "
- "to full path to libgflags.")
- endif (NOT GFLAGS_LIBRARY OR
- NOT EXISTS ${GFLAGS_LIBRARY})
-
- # gflags typically requires a threading library (which is OS dependent), note
- # that this defines the CMAKE_THREAD_LIBS_INIT variable. If we are able to
- # detect threads, we assume that gflags requires it.
- find_package(Threads QUIET)
- set(GFLAGS_LINK_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
- # On Windows (including MinGW), the Shlwapi library is used by gflags if
- # available.
- if (WIN32)
- include(CheckIncludeFileCXX)
- check_include_file_cxx("shlwapi.h" HAVE_SHLWAPI)
- if (HAVE_SHLWAPI)
- list(APPEND GFLAGS_LINK_LIBRARIES shlwapi.lib)
- endif(HAVE_SHLWAPI)
- endif (WIN32)
-
- # Mark internally as found, then verify. GFLAGS_REPORT_NOT_FOUND() unsets
- # if called.
- set(GFLAGS_FOUND TRUE)
-
- # Identify what namespace gflags was built with.
- if (GFLAGS_INCLUDE_DIR AND NOT GFLAGS_NAMESPACE)
- # To handle Windows peculiarities / CMake bugs on MSVC we try two approaches
- # to detect the gflags namespace:
- #
- # 1) Try to use check_cxx_source_compiles() to compile a trivial program
- # with the two choices for the gflags namespace.
- #
- # 2) [In the event 1) fails] Use regex on the gflags headers to try to
- # determine the gflags namespace. Whilst this is less robust than 1),
- # it does avoid any interaction with msbuild.
- gflags_check_gflags_namespace_using_try_compile()
-
- if (NOT GFLAGS_NAMESPACE)
- # Failed to determine gflags namespace using check_cxx_source_compiles()
- # method, try and obtain it using regex on the gflags headers instead.
- message(STATUS "Failed to find gflags namespace using using "
- "check_cxx_source_compiles(), trying namespace regex instead, "
- "this is expected on Windows.")
- gflags_check_gflags_namespace_using_regex()
-
- if (NOT GFLAGS_NAMESPACE)
- gflags_report_not_found(
- "Failed to determine gflags namespace either by "
- "check_cxx_source_compiles(), or namespace regex.")
- endif (NOT GFLAGS_NAMESPACE)
- endif (NOT GFLAGS_NAMESPACE)
- endif (GFLAGS_INCLUDE_DIR AND NOT GFLAGS_NAMESPACE)
-
- # Make the GFLAGS_NAMESPACE a cache variable s/t the user can view it, and could
- # overwrite it in the CMake GUI.
- set(GFLAGS_NAMESPACE "${GFLAGS_NAMESPACE}" CACHE STRING
- "gflags namespace (google or gflags)" FORCE)
-
- # gflags does not seem to provide any record of the version in its
- # source tree, thus cannot extract version.
-
- # Catch case when caller has set GFLAGS_NAMESPACE in the cache / GUI
- # with an invalid value.
- if (GFLAGS_NAMESPACE AND
- NOT GFLAGS_NAMESPACE STREQUAL "google" AND
- NOT GFLAGS_NAMESPACE STREQUAL "gflags")
- gflags_report_not_found(
- "Caller defined GFLAGS_NAMESPACE:"
- " ${GFLAGS_NAMESPACE} is not valid, not google or gflags.")
- endif ()
- # Catch case when caller has set GFLAGS_INCLUDE_DIR in the cache / GUI and
- # thus FIND_[PATH/LIBRARY] are not called, but specified locations are
- # invalid, otherwise we would report the library as found.
- if (GFLAGS_INCLUDE_DIR AND
- NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h)
- gflags_report_not_found(
- "Caller defined GFLAGS_INCLUDE_DIR:"
- " ${GFLAGS_INCLUDE_DIR} does not contain gflags/gflags.h header.")
- endif (GFLAGS_INCLUDE_DIR AND
- NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h)
- # TODO: This regex for gflags library is pretty primitive, we use lowercase
- # for comparison to handle Windows using CamelCase library names, could
- # this check be better?
- string(TOLOWER "${GFLAGS_LIBRARY}" LOWERCASE_GFLAGS_LIBRARY)
- if (GFLAGS_LIBRARY AND
- NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*")
- gflags_report_not_found(
- "Caller defined GFLAGS_LIBRARY: "
- "${GFLAGS_LIBRARY} does not match gflags.")
- endif (GFLAGS_LIBRARY AND
- NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*")
-
- gflags_reset_find_library_prefix()
-
-endif(NOT GFLAGS_FOUND)
-
-# Set standard CMake FindPackage variables if found.
-if (GFLAGS_FOUND)
- set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR})
- set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY} ${GFLAGS_LINK_LIBRARIES})
-endif (GFLAGS_FOUND)
-
-# Handle REQUIRED / QUIET optional arguments.
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(Gflags DEFAULT_MSG
- GFLAGS_INCLUDE_DIRS GFLAGS_LIBRARIES GFLAGS_NAMESPACE)
-
-# Only mark internal variables as advanced if we found gflags, otherwise
-# leave them visible in the standard GUI for the user to set manually.
-if (GFLAGS_FOUND)
- mark_as_advanced(FORCE GFLAGS_INCLUDE_DIR
- GFLAGS_LIBRARY
- GFLAGS_NAMESPACE
- gflags_DIR) # Autogenerated by find_package(gflags)
-endif (GFLAGS_FOUND)
diff --git a/cmake/FindGlog.cmake b/cmake/FindGlog.cmake
deleted file mode 100644
index 142e2ca96ba76d46a4cd59518e384258e826dcca..0000000000000000000000000000000000000000
--- a/cmake/FindGlog.cmake
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Find libglog
-#
-# LIBGLOG_INCLUDE_DIR - where to find glog/logging.h, etc.
-# LIBGLOG_LIBRARY - List of libraries when using libglog.
-# LIBGLOG_FOUND - True if libglog found.
-#
-# from https://github.com/facebook/hhvm/blob/master/CMake/FindGlog.cmake
-
-IF (LIBGLOG_INCLUDE_DIR)
- # Already in cache, be silent
- SET(LIBGLOG_FIND_QUIETLY TRUE)
-ENDIF ()
-
-FIND_PATH(LIBGLOG_INCLUDE_DIR glog/logging.h)
-
-FIND_LIBRARY(LIBGLOG_LIBRARY glog)
-
-# handle the QUIETLY and REQUIRED arguments and set LIBGLOG_FOUND to TRUE if
-# all listed variables are TRUE
-INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(LIBGLOG DEFAULT_MSG LIBGLOG_LIBRARY LIBGLOG_INCLUDE_DIR)
-
-MARK_AS_ADVANCED(LIBGLOG_LIBRARY LIBGLOG_INCLUDE_DIR)
\ No newline at end of file
diff --git a/cmake/FindGperftools.cmake b/cmake/FindGperftools.cmake
deleted file mode 100644
index 928f573a4fb82391859e334d50e6c8ed0e26aae2..0000000000000000000000000000000000000000
--- a/cmake/FindGperftools.cmake
+++ /dev/null
@@ -1,63 +0,0 @@
-# Tries to find Gperftools.
-#
-# Usage of this module as follows:
-#
-# find_package(Gperftools)
-#
-# Variables used by this module, they can change the default behaviour and need
-# to be set before calling find_package:
-#
-# Gperftools_ROOT_DIR Set this variable to the root installation of
-# Gperftools if the module has problems finding
-# the proper installation path.
-#
-# Variables defined by this module:
-#
-# GPERFTOOLS_FOUND System has Gperftools libs/headers
-# GPERFTOOLS_LIBRARIES The Gperftools libraries (tcmalloc & profiler)
-# GPERFTOOLS_INCLUDE_DIR The location of Gperftools headers
-
-find_library(GPERFTOOLS_TCMALLOC
- NAMES tcmalloc
- HINTS ${Gperftools_ROOT_DIR}/lib)
-
-find_library(GPERFTOOLS_PROFILER
- NAMES profiler
- HINTS ${Gperftools_ROOT_DIR}/lib)
-
-find_library(GPERFTOOLS_TCMALLOC_AND_PROFILER
- NAMES tcmalloc_and_profiler
- HINTS ${Gperftools_ROOT_DIR}/lib)
-
-find_path(GPERFTOOLS_INCLUDE_DIR
- NAMES gperftools/heap-profiler.h
- HINTS ${Gperftools_ROOT_DIR}/include)
-
-set(GPERFTOOLS_LIBRARIES ${GPERFTOOLS_TCMALLOC_AND_PROFILER})
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(
- Gperftools
- DEFAULT_MSG
- GPERFTOOLS_LIBRARIES
- GPERFTOOLS_INCLUDE_DIR)
-
-mark_as_advanced(
- Gperftools_ROOT_DIR
- GPERFTOOLS_TCMALLOC
- GPERFTOOLS_PROFILER
- GPERFTOOLS_TCMALLOC_AND_PROFILER
- GPERFTOOLS_LIBRARIES
- GPERFTOOLS_INCLUDE_DIR)
-
-# create IMPORTED targets
-if (Gperftools_FOUND AND NOT TARGET gperftools::tcmalloc)
- add_library(gperftools::tcmalloc UNKNOWN IMPORTED)
- set_target_properties(gperftools::tcmalloc PROPERTIES
- IMPORTED_LOCATION ${GPERFTOOLS_TCMALLOC}
- INTERFACE_INCLUDE_DIRECTORIES "${GPERFTOOLS_INCLUDE_DIR}")
- add_library(gperftools::profiler UNKNOWN IMPORTED)
- set_target_properties(gperftools::profiler PROPERTIES
- IMPORTED_LOCATION ${GPERFTOOLS_PROFILER}
- INTERFACE_INCLUDE_DIRECTORIES "${GPERFTOOLS_INCLUDE_DIR}")
-endif()
diff --git a/cmake/FindJeMalloc.cmake b/cmake/FindJeMalloc.cmake
deleted file mode 100644
index b95287160ba610b2dfa93ba15e7c7c8214d80ac1..0000000000000000000000000000000000000000
--- a/cmake/FindJeMalloc.cmake
+++ /dev/null
@@ -1,28 +0,0 @@
-# - Find JeMalloc library
-# Find the native JeMalloc includes and library
-#
-# JEMALLOC_INCLUDE_DIR - where to find jemalloc.h, etc.
-# JEMALLOC_LIBRARIES - List of libraries when using jemalloc.
-# JEMALLOC_FOUND - True if jemalloc found.
-
-find_path(JEMALLOC_INCLUDE_DIR
- NAMES jemalloc/jemalloc.h
- HINTS ${JEMALLOC_ROOT_DIR}/include)
-
-find_library(JEMALLOC_LIBRARIES
- NAMES jemalloc
- HINTS ${JEMALLOC_ROOT_DIR}/lib)
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(jemalloc DEFAULT_MSG JEMALLOC_LIBRARIES JEMALLOC_INCLUDE_DIR)
-
-mark_as_advanced(
- JEMALLOC_LIBRARIES
- JEMALLOC_INCLUDE_DIR)
-
-if (JEMALLOC_FOUND)
- add_library(jemalloc::jemalloc UNKNOWN IMPORTED)
- set_target_properties(jemalloc::jemalloc PROPERTIES
- IMPORTED_LOCATION ${JEMALLOC_LIBRARIES}
- INTERFACE_INCLUDE_DIRECTORIES "${JEMALLOC_INCLUDE_DIR}")
-endif()
diff --git a/cmake/FindNumPy.cmake b/cmake/FindNumPy.cmake
deleted file mode 100644
index 8cdd642ac01315949f7fee3a981a17d67d1e4198..0000000000000000000000000000000000000000
--- a/cmake/FindNumPy.cmake
+++ /dev/null
@@ -1,38 +0,0 @@
-# Find the Python NumPy package
-# PYTHON_NUMPY_INCLUDE_DIR
-# NUMPY_FOUND
-# will be set by this script
-
-cmake_minimum_required(VERSION 2.6)
-
-if(NOT PYTHON_EXECUTABLE)
- if(NumPy_FIND_QUIETLY)
- find_package(PythonInterp QUIET)
- else()
- find_package(PythonInterp)
- set(_numpy_out 1)
- endif()
-endif()
-
-if (PYTHON_EXECUTABLE)
- # write a python script that finds the numpy path
- file(WRITE ${PROJECT_BINARY_DIR}/FindNumpyPath.py
- "try: import numpy; print(numpy.get_include())\nexcept:pass\n")
-
- # execute the find script
- exec_program("${PYTHON_EXECUTABLE}" ${PROJECT_BINARY_DIR}
- ARGS "FindNumpyPath.py"
- OUTPUT_VARIABLE NUMPY_PATH)
-elseif(_numpy_out)
- message(STATUS "Python executable not found.")
-endif(PYTHON_EXECUTABLE)
-
-find_path(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h
- HINTS "${NUMPY_PATH}" "${PYTHON_INCLUDE_PATH}")
-
-if(PYTHON_NUMPY_INCLUDE_DIR)
- set(PYTHON_NUMPY_FOUND 1 CACHE INTERNAL "Python numpy found")
-endif(PYTHON_NUMPY_INCLUDE_DIR)
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(NumPy DEFAULT_MSG PYTHON_NUMPY_INCLUDE_DIR)
diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake
deleted file mode 100644
index 52ac31d1d125afb89fb0ae783fba94ab9a0c5a1a..0000000000000000000000000000000000000000
--- a/cmake/cblas.cmake
+++ /dev/null
@@ -1,94 +0,0 @@
-# Find the CBlas and lapack libraries
-#
-# It will search MKLML, atlas, OpenBlas, reference-cblas in order.
-#
-# If any cblas implementation found, the following variable will be set.
-# CBLAS_PROVIDER # one of MKLML, OPENBLAS, REFERENCE
-# CBLAS_INC_DIR # the include directory for cblas.
-# CBLAS_LIBS # a list of libraries should be linked by paddle.
-# # Each library should be full path to object file.
-
-set(CBLAS_FOUND OFF)
-
-## Find MKLML First.
-if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB)
- set(CBLAS_FOUND ON)
- set(CBLAS_PROVIDER MKLML)
- set(CBLAS_INC_DIR ${MKLML_INC_DIR})
- set(CBLAS_LIBRARIES ${MKLML_LIB})
-
- add_definitions(-DPADDLE_WITH_MKLML)
- add_definitions(-DLAPACK_FOUND)
-
- message(STATUS "Found cblas and lapack in MKLML "
- "(include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})")
- return()
-endif()
-
-## Then find openblas.
-set(OPENBLAS_ROOT $ENV{OPENBLAS_ROOT} CACHE PATH "Folder contains Openblas")
-set(OPENBLAS_INCLUDE_SEARCH_PATHS
- ${OPENBLAS_ROOT}/include
- /usr/include
- /usr/include/openblas
- /usr/local/opt/openblas/include)
-set(OPENBLAS_LIB_SEARCH_PATHS
- ${OPENBLAS_ROOT}/lib
- /usr/lib
- /usr/lib/blas/openblas
- /usr/lib/openblas
- /usr/local/opt/openblas/lib)
-
-find_path(OPENBLAS_INC_DIR NAMES cblas.h
- PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS} NO_DEFAULT_PATH)
-find_path(OPENBLAS_LAPACKE_INC_DIR NAMES lapacke.h
- PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS})
-find_library(OPENBLAS_LIB NAMES openblas
- PATHS ${OPENBLAS_LIB_SEARCH_PATHS})
-
-if(OPENBLAS_LAPACKE_INC_DIR AND OPENBLAS_INC_DIR AND OPENBLAS_LIB)
- set(CBLAS_FOUND ON)
- set(CBLAS_PROVIDER OPENBLAS)
- set(CBLAS_INC_DIR ${OPENBLAS_INC_DIR} ${OPENBLAS_LAPACKE_INC_DIR})
- set(CBLAS_LIBRARIES ${OPENBLAS_LIB})
-
- add_definitions(-DPADDLE_USE_OPENBLAS)
- add_definitions(-DLAPACK_FOUND)
-
- message(STATUS "Found OpenBLAS (include: ${OPENBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})")
- message(STATUS "Found lapack in OpenBLAS (include: ${OPENBLAS_LAPACKE_INC_DIR})")
- return()
-endif()
-
-
-## Then find the reference-cblas. www.netlib.org/blas/
-set(REFERENCE_CBLAS_ROOT $ENV{REFERENCE_CBLAS_ROOT} CACHE PATH
- "Folder contains reference-cblas")
-set(REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS
- ${REFERENCE_CBLAS_ROOT}/include
- /usr/include
- /usr/include/cblas
-)
-
-set(REFERENCE_CBLAS_LIB_SEARCH_PATHS
- ${REFERENCE_CBLAS_ROOT}/lib
- /usr/lib
- /usr/lib/blas/reference/
- /usr/lib/reference/
-)
-
-if(WITH_SYSTEM_BLAS)
- find_path(REFERENCE_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS
- ${REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS})
- find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS
- ${REFERENCE_CBLAS_LIB_SEARCH_PATHS})
-
- if(REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY)
- set(CBLAS_FOUND ON)
- set(CBLAS_PROVIDER REFERENCE)
- set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR})
- set(CBLAS_LIBRARIES ${REFERENCE_CBLAS_LIBRARY})
- add_definitions(-DPADDLE_USE_REFERENCE_CBLAS)
- message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})")
- endif()
-endif()
diff --git a/cmake/ccache.cmake b/cmake/ccache.cmake
deleted file mode 100644
index 900f59d4cb83bc9ce1893b2d3bd95f5a08b164bb..0000000000000000000000000000000000000000
--- a/cmake/ccache.cmake
+++ /dev/null
@@ -1,9 +0,0 @@
-# Use ccache if found ccache program
-
-find_program(CCACHE_PATH ccache)
-
-if(CCACHE_PATH)
- message(STATUS "Ccache is founded, use ccache to speed up compile.")
- set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_PATH})
- set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_PATH})
-endif(CCACHE_PATH)
diff --git a/cmake/configure.cmake b/cmake/configure.cmake
deleted file mode 100644
index 67830fe2e0ec3c35064acb4c00ec152989ddb655..0000000000000000000000000000000000000000
--- a/cmake/configure.cmake
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT WITH_PYTHON)
- add_definitions(-DPADDLE_NO_PYTHON)
-endif(NOT WITH_PYTHON)
-
-if(WITH_DSO)
- add_definitions(-DPADDLE_USE_DSO)
-endif(WITH_DSO)
-
-if(WITH_TESTING)
- add_definitions(-DPADDLE_WITH_TESTING)
-endif(WITH_TESTING)
-
-if(NOT WITH_PROFILER)
- add_definitions(-DPADDLE_DISABLE_PROFILER)
-endif(NOT WITH_PROFILER)
-
-if(WITH_AVX AND AVX_FOUND)
- set(SIMD_FLAG ${AVX_FLAG})
-elseif(SSE3_FOUND)
- set(SIMD_FLAG ${SSE3_FLAG})
-endif()
-
-if(LITE_WITH_CUDA)
- add_definitions(-DLITE_WITH_CUDA)
- add_definitions(-DEIGEN_USE_GPU)
-
- FIND_PACKAGE(CUDA REQUIRED)
-
- if(${CUDA_VERSION_MAJOR} VERSION_LESS 7)
- message(FATAL_ERROR "Paddle needs CUDA >= 7.0 to compile")
- endif()
-
- if(NOT CUDNN_FOUND)
- message(FATAL_ERROR "Paddle needs cudnn to compile")
- endif()
- if(CUPTI_FOUND)
- include_directories(${CUPTI_INCLUDE_DIR})
- add_definitions(-DPADDLE_WITH_CUPTI)
- else()
- message(STATUS "Cannot find CUPTI, GPU Profiling is incorrect.")
- endif()
- set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SIMD_FLAG}")
-
- # Include cuda and cudnn
- include_directories(${CUDNN_INCLUDE_DIR})
- include_directories(${CUDA_TOOLKIT_INCLUDE})
-
-elseif(WITH_AMD_GPU)
- add_definitions(-DPADDLE_WITH_HIP)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__HIP_PLATFORM_HCC__")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__HIP_PLATFORM_HCC__")
-else()
- add_definitions(-DHPPL_STUB_FUNC)
- list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu)
-endif()
-
-if (WITH_MKLML AND MKLML_IOMP_LIB)
- message(STATUS "Enable Intel OpenMP with ${MKLML_IOMP_LIB}")
- if(WIN32)
- # openmp not support well for now on windows
- set(OPENMP_FLAGS "")
- else(WIN32)
- set(OPENMP_FLAGS "-fopenmp")
- endif(WIN32)
- set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
- set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMP_FLAGS}")
-endif()
-
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SIMD_FLAG}")
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAG}")
-
-if(WITH_DISTRIBUTE)
- add_definitions(-DPADDLE_WITH_DISTRIBUTE)
-endif()
-
-if(WITH_GRPC)
- add_definitions(-DPADDLE_WITH_GRPC)
-endif(WITH_GRPC)
-
-if(WITH_BRPC_RDMA)
- add_definitions(-DPADDLE_WITH_BRPC_RDMA)
-endif(WITH_BRPC_RDMA)
-
-if(ON_INFER)
- add_definitions(-DPADDLE_ON_INFERENCE)
-endif(ON_INFER)
-
-if(WITH_WBAES)
- add_definitions(-DPADDLE_WITH_WBAES)
-endif(WITH_WBAES)
-
-if (REPLACE_ENFORCE_GLOG)
- add_definitions("-DREPLACE_ENFORCE_GLOG")
-endif()
-
-# for lite
-# TODO(Superjomn) not work fine with the option
-if (LITE_WITH_X86)
- add_definitions("-DLITE_WITH_X86")
-endif()
-
-if (LITE_WITH_ARM)
- add_definitions("-DLITE_WITH_ARM")
-endif()
-
-if (WITH_ARM_DOTPROD)
- add_definitions("-DWITH_ARM_DOTPROD")
-endif()
-
-if (LITE_WITH_NPU)
- add_definitions("-DLITE_WITH_NPU")
-endif()
-
-if (LITE_WITH_OPENCL)
- add_definitions("-DLITE_WITH_OPENCL")
-endif()
-
-if (LITE_WITH_FPGA)
-add_definitions("-DLITE_WITH_FPGA")
-endif()
-
-if (LITE_WITH_PROFILE)
- add_definitions("-DLITE_WITH_PROFILE")
- if (LITE_WITH_PRECISION_PROFILE)
- add_definitions("-DLITE_WITH_PRECISION_PROFILE")
- endif()
-endif()
-
-if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- add_definitions("-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK")
-endif()
-
-if (LITE_SHUTDOWN_LOG)
- add_definitions("-DLITE_SHUTDOWN_LOG")
-endif()
-
-if (LITE_ON_TINY_PUBLISH)
- add_definitions("-DLITE_ON_TINY_PUBLISH")
-endif()
-
-if (LITE_ON_MODEL_OPTIMIZE_TOOL)
- add_definitions("-DLITE_ON_MODEL_OPTIMIZE_TOOL")
-endif(LITE_ON_MODEL_OPTIMIZE_TOOL)
-
diff --git a/cmake/coveralls.cmake b/cmake/coveralls.cmake
deleted file mode 100644
index ca1471cabb57c0795ee193493d2e60bb5bd9e1cc..0000000000000000000000000000000000000000
--- a/cmake/coveralls.cmake
+++ /dev/null
@@ -1,103 +0,0 @@
-# CMake script for code coverage.
-# If _COVERALLS_UPLOAD is ON, it will upload json files to overalls.io automatically.
-
-# Param _COVERAGE_SRCS A list of coverage source files.
-# Param _COVERALLS_UPLOAD Upload the result to coveralls.
-# Param _CMAKE_SCRIPT_PATH CMake script path.
-function(code_coverage _COVERAGE_SRCS _COVERALLS_UPLOAD _CMAKE_SCRIPT_PATH)
- # clean previous gcov data.
- file(REMOVE_RECURSE ${PROJECT_BINARY_DIR}/*.gcda)
-
- # find curl for upload JSON soon.
- if (_COVERALLS_UPLOAD)
- find_program(CURL_EXECUTABLE curl)
- if (NOT CURL_EXECUTABLE)
- message(FATAL_ERROR "Coveralls: curl not found!")
- endif()
- endif()
-
- # When passing a CMake list to an external process, the list
- # will be converted from the format "1;2;3" to "1 2 3".
- set(COVERAGE_SRCS "")
- foreach (SINGLE_SRC ${_COVERAGE_SRCS})
- set(COVERAGE_SRCS "${COVERAGE_SRCS}*${SINGLE_SRC}")
- endforeach()
-
- # query number of logical cores
- cmake_host_system_information(RESULT core_size QUERY NUMBER_OF_LOGICAL_CORES)
- # coveralls json file.
- set(COVERALLS_FILE ${PROJECT_BINARY_DIR}/coveralls.json)
- add_custom_target(coveralls_generate
- # Run regress tests.
- COMMAND ${CMAKE_CTEST_COMMAND}
- -j ${core_size}
- --output-on-failure
- # Generate Gcov and translate it into coveralls JSON.
- COMMAND ${CMAKE_COMMAND}
- -DCOVERAGE_SRCS="${COVERAGE_SRCS}"
- -DCOVERALLS_OUTPUT_FILE="${COVERALLS_FILE}"
- -DCOV_PATH="${PROJECT_BINARY_DIR}"
- -DPROJECT_ROOT="${PROJECT_SOURCE_DIR}"
- -P "${_CMAKE_SCRIPT_PATH}/coverallsGcovJsons.cmake"
- WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
- COMMENT "Coveralls: generating coveralls output..."
- )
-
- if (_COVERALLS_UPLOAD)
- message("COVERALLS UPLOAD: ON")
- # Upload the JSON to coveralls.
- add_custom_target(coveralls_upload
- COMMAND ${CURL_EXECUTABLE}
- -S -F json_file=@${COVERALLS_FILE}
- https://coveralls.io/api/v1/jobs
- DEPENDS coveralls_generate
- WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
- COMMENT "Coveralls: uploading coveralls output...")
-
- add_custom_target(coveralls DEPENDS coveralls_upload)
- else()
- message("COVERALLS UPLOAD: OFF")
- add_custom_target(coveralls DEPENDS coveralls_generate)
- endif()
-endfunction()
-
-if(WITH_COVERAGE)
- set(CMAKE_BUILD_TYPE "Debug")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
-
- set(EXCLUDE_DIRS
- "demo/"
- "build/"
- "tests/"
- ".test_env/"
- )
-
- if(WITH_GPU)
- file(GLOB_RECURSE PADDLE_SOURCES RELATIVE "${PROJECT_SOURCE_DIR}" "*.cpp" "*.cc" ".c" "*.cu")
- else()
- file(GLOB_RECURSE PADDLE_SOURCES RELATIVE "${PROJECT_SOURCE_DIR}" "*.cpp" "*.cc" "*.c")
- endif()
-
- # exclude trivial files in PADDLE_SOURCES
- foreach(EXCLUDE_DIR ${EXCLUDE_DIRS})
- foreach(TMP_PATH ${PADDLE_SOURCES})
- string(FIND ${TMP_PATH} ${EXCLUDE_DIR} EXCLUDE_DIR_FOUND)
- if(NOT ${EXCLUDE_DIR_FOUND} EQUAL -1)
- list(REMOVE_ITEM PADDLE_SOURCES ${TMP_PATH})
- endif()
- endforeach(TMP_PATH)
- endforeach()
-
- # convert to absolute path
- set(PADDLE_SRCS "")
- foreach(PADDLE_SRC ${PADDLE_SOURCES})
- set(PADDLE_SRCS "${PADDLE_SRCS};${PROJECT_SOURCE_DIR}/${PADDLE_SRC}")
- endforeach()
-
- code_coverage(
- "${PADDLE_SRCS}"
- ${COVERALLS_UPLOAD}
- "${PROJECT_SOURCE_DIR}/cmake"
- )
-endif()
diff --git a/cmake/coverallsGcovJsons.cmake b/cmake/coverallsGcovJsons.cmake
deleted file mode 100644
index 4641184fcf5273b884524d9b9444209ffb65e000..0000000000000000000000000000000000000000
--- a/cmake/coverallsGcovJsons.cmake
+++ /dev/null
@@ -1,401 +0,0 @@
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-#
-# Copyright (C) 2014 Joakim Söderberg
-#
-# This is intended to be run by a custom target in a CMake project like this.
-# 0. Compile program with coverage support.
-# 1. Clear coverage data. (Recursively delete *.gcda in build dir)
-# 2. Run the unit tests.
-# 3. Run this script specifying which source files the coverage should be performed on.
-#
-# This script will then use gcov to generate .gcov files in the directory specified
-# via the COV_PATH var. This should probably be the same as your cmake build dir.
-#
-# It then parses the .gcov files to convert them into the Coveralls JSON format:
-# https://coveralls.io/docs/api
-#
-
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
-
-# Since it's not possible to pass a CMake list properly in the
-# "1;2;3" format to an external process, we have replaced the
-# ";" with "*", so reverse that here so we get it back into the
-# CMake list format.
-string(REGEX REPLACE "\\*" ";" COVERAGE_SRCS ${COVERAGE_SRCS})
-
-find_program(GCOV_EXECUTABLE gcov)
-if (NOT GCOV_EXECUTABLE)
- message(FATAL_ERROR "gcov not found! Aborting...")
-endif()
-
-find_package(Git)
-
-# TODO: Add these git things to the coveralls json.
-if (GIT_FOUND)
- # Branch.
- execute_process(
- COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref HEAD
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
- OUTPUT_VARIABLE GIT_BRANCH
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
-
- macro (git_log_format FORMAT_CHARS VAR_NAME)
- execute_process(
- COMMAND ${GIT_EXECUTABLE} log -1 --pretty=format:%${FORMAT_CHARS}
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
- OUTPUT_VARIABLE ${VAR_NAME}
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
- endmacro()
-
- git_log_format(an GIT_AUTHOR_EMAIL)
- git_log_format(ae GIT_AUTHOR_EMAIL)
- git_log_format(cn GIT_COMMITTER_NAME)
- git_log_format(ce GIT_COMMITTER_EMAIL)
- git_log_format(B GIT_COMMIT_MESSAGE)
-
- message("Git exe: ${GIT_EXECUTABLE}")
- message("Git branch: ${GIT_BRANCH}")
- message("Git author: ${GIT_AUTHOR_NAME}")
- message("Git e-mail: ${GIT_AUTHOR_EMAIL}")
- message("Git commiter name: ${GIT_COMMITTER_NAME}")
- message("Git commiter e-mail: ${GIT_COMMITTER_EMAIL}")
- message("Git commit message: ${GIT_COMMIT_MESSAGE}")
-
-endif()
-
-############################# Macros #########################################
-
-#
-# This macro converts from the full path format gcov outputs:
-#
-# /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov
-#
-# to the original source file path the .gcov is for:
-#
-# /path/to/project/root/subdir/the_file.c
-#
-macro(get_source_path_from_gcov_filename _SRC_FILENAME _GCOV_FILENAME)
-
- # /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov
- # ->
- # #path#to#project#root#subdir#the_file.c.gcov
- get_filename_component(_GCOV_FILENAME_WEXT ${_GCOV_FILENAME} NAME)
-
- # #path#to#project#root#subdir#the_file.c.gcov -> /path/to/project/root/subdir/the_file.c
- string(REGEX REPLACE "\\.gcov$" "" SRC_FILENAME_TMP ${_GCOV_FILENAME_WEXT})
- string(REGEX REPLACE "\#" "/" SRC_FILENAME_TMP ${SRC_FILENAME_TMP})
- set(${_SRC_FILENAME} "${SRC_FILENAME_TMP}")
-endmacro()
-
-##############################################################################
-
-# Get the coverage data.
-file(GLOB_RECURSE GCDA_FILES "${COV_PATH}" "*.gcda")
-message("Process GCDA files:")
-message("===============================")
-
-# Get a list of all the object directories needed by gcov
-# (The directories the .gcda files and .o files are found in)
-# and run gcov on those.
-foreach(GCDA ${GCDA_FILES})
- get_filename_component(GCDA_DIR ${GCDA} PATH)
-
- #
- # The -p below refers to "Preserve path components",
- # This means that the generated gcov filename of a source file will
- # keep the original files entire filepath, but / is replaced with #.
- # Example:
- #
- # /path/to/project/root/build/CMakeFiles/the_file.dir/subdir/the_file.c.gcda
- # ------------------------------------------------------------------------------
- # File '/path/to/project/root/subdir/the_file.c'
- # Lines executed:68.34% of 199
- # /path/to/project/root/subdir/the_file.c:creating '#path#to#project#root#subdir#the_file.c.gcov'
- #
- # If -p is not specified then the file is named only "the_file.c.gcov"
- #
- execute_process(
- COMMAND ${GCOV_EXECUTABLE} -p -o ${GCDA_DIR} ${GCDA} >/dev/null
- WORKING_DIRECTORY ${GCDA_DIR}
- )
-endforeach()
-
-# TODO: Make these be absolute path
-file(GLOB_RECURSE ALL_GCOV_FILES "${COV_PATH}" "*.gcov")
-
-# Get only the filenames to use for filtering.
-#set(COVERAGE_SRCS_NAMES "")
-#foreach (COVSRC ${COVERAGE_SRCS})
-# get_filename_component(COVSRC_NAME ${COVSRC} NAME)
-# message("${COVSRC} -> ${COVSRC_NAME}")
-# list(APPEND COVERAGE_SRCS_NAMES "${COVSRC_NAME}")
-#endforeach()
-
-#
-# Filter out all but the gcov files we want.
-#
-# We do this by comparing the list of COVERAGE_SRCS filepaths that the
-# user wants the coverage data for with the paths of the generated .gcov files,
-# so that we only keep the relevant gcov files.
-#
-# Example:
-# COVERAGE_SRCS =
-# /path/to/project/root/subdir/the_file.c
-#
-# ALL_GCOV_FILES =
-# /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov
-# /path/to/project/root/build/#path#to#project#root#subdir#other_file.c.gcov
-#
-# Result should be:
-# GCOV_FILES =
-# /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov
-#
-set(GCOV_FILES "")
-#message("Look in coverage sources: ${COVERAGE_SRCS}")
-message("\nFilter out unwanted GCOV files:")
-message("===============================")
-
-set(COVERAGE_SRCS_REMAINING ${COVERAGE_SRCS})
-
-foreach (GCOV_FILE ${ALL_GCOV_FILES})
-
- #
- # /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov
- # ->
- # /path/to/project/root/subdir/the_file.c
- get_source_path_from_gcov_filename(GCOV_SRC_PATH ${GCOV_FILE})
-
- # Is this in the list of source files?
- # TODO: We want to match against relative path filenames from the source file root...
- list(FIND COVERAGE_SRCS ${GCOV_SRC_PATH} WAS_FOUND)
-
- if (NOT WAS_FOUND EQUAL -1)
- message("YES: ${GCOV_FILE}")
- list(APPEND GCOV_FILES ${GCOV_FILE})
-
- # We remove it from the list, so we don't bother searching for it again.
- # Also files left in COVERAGE_SRCS_REMAINING after this loop ends should
- # have coverage data generated from them (no lines are covered).
- list(REMOVE_ITEM COVERAGE_SRCS_REMAINING ${GCOV_SRC_PATH})
- else()
- message("NO: ${GCOV_FILE}")
- endif()
-endforeach()
-
-# TODO: Enable setting these
-set(JSON_SERVICE_NAME "travis-ci")
-set(JSON_SERVICE_JOB_ID $ENV{TRAVIS_JOB_ID})
-
-set(JSON_TEMPLATE
-"{
- \"service_name\": \"\@JSON_SERVICE_NAME\@\",
- \"service_job_id\": \"\@JSON_SERVICE_JOB_ID\@\",
- \"source_files\": \@JSON_GCOV_FILES\@
-}"
-)
-
-set(SRC_FILE_TEMPLATE
-"{
- \"name\": \"\@GCOV_SRC_REL_PATH\@\",
- \"source_digest\": \"\@GCOV_CONTENTS_MD5\@\",
- \"coverage\": \@GCOV_FILE_COVERAGE\@
- }"
-)
-
-message("\nGenerate JSON for files:")
-message("=========================")
-
-set(JSON_GCOV_FILES "[")
-
-# Read the GCOV files line by line and get the coverage data.
-foreach (GCOV_FILE ${GCOV_FILES})
-
- get_source_path_from_gcov_filename(GCOV_SRC_PATH ${GCOV_FILE})
- file(RELATIVE_PATH GCOV_SRC_REL_PATH "${PROJECT_ROOT}" "${GCOV_SRC_PATH}")
-
- # The new coveralls API doesn't need the entire source (Yay!)
- # However, still keeping that part for now. Will cleanup in the future.
- file(MD5 "${GCOV_SRC_PATH}" GCOV_CONTENTS_MD5)
- message("MD5: ${GCOV_SRC_PATH} = ${GCOV_CONTENTS_MD5}")
-
- # Loads the gcov file as a list of lines.
- # (We first open the file and replace all occurences of [] with _
- # because CMake will fail to parse a line containing unmatched brackets...
- # also the \ to escaped \n in macros screws up things.)
- # https://public.kitware.com/Bug/view.php?id=15369
- file(READ ${GCOV_FILE} GCOV_CONTENTS)
- string(REPLACE "[" "_" GCOV_CONTENTS "${GCOV_CONTENTS}")
- string(REPLACE "]" "_" GCOV_CONTENTS "${GCOV_CONTENTS}")
- string(REPLACE "\\" "_" GCOV_CONTENTS "${GCOV_CONTENTS}")
- file(WRITE ${GCOV_FILE}_tmp "${GCOV_CONTENTS}")
-
- file(STRINGS ${GCOV_FILE}_tmp GCOV_LINES)
- list(LENGTH GCOV_LINES LINE_COUNT)
-
- # Instead of trying to parse the source from the
- # gcov file, simply read the file contents from the source file.
- # (Parsing it from the gcov is hard because C-code uses ; in many places
- # which also happens to be the same as the CMake list delimeter).
- file(READ ${GCOV_SRC_PATH} GCOV_FILE_SOURCE)
-
- string(REPLACE "\\" "\\\\" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
- string(REGEX REPLACE "\"" "\\\\\"" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
- string(REPLACE "\t" "\\\\t" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
- string(REPLACE "\r" "\\\\r" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
- string(REPLACE "\n" "\\\\n" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
- # According to http://json.org/ these should be escaped as well.
- # Don't know how to do that in CMake however...
- #string(REPLACE "\b" "\\\\b" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
- #string(REPLACE "\f" "\\\\f" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
- #string(REGEX REPLACE "\u([a-fA-F0-9]{4})" "\\\\u\\1" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}")
-
- # We want a json array of coverage data as a single string
- # start building them from the contents of the .gcov
- set(GCOV_FILE_COVERAGE "[")
-
- set(GCOV_LINE_COUNT 1) # Line number for the .gcov.
- set(DO_SKIP 0)
- foreach (GCOV_LINE ${GCOV_LINES})
- #message("${GCOV_LINE}")
- # Example of what we're parsing:
- # Hitcount |Line | Source
- # " 8: 26: if (!allowed || (strlen(allowed) == 0))"
- string(REGEX REPLACE
- "^([^:]*):([^:]*):(.*)$"
- "\\1;\\2;\\3"
- RES
- "${GCOV_LINE}")
-
- # Check if we should exclude lines using the Lcov syntax.
- string(REGEX MATCH "LCOV_EXCL_START" START_SKIP "${GCOV_LINE}")
- string(REGEX MATCH "LCOV_EXCL_END" END_SKIP "${GCOV_LINE}")
- string(REGEX MATCH "LCOV_EXCL_LINE" LINE_SKIP "${GCOV_LINE}")
-
- set(RESET_SKIP 0)
- if (LINE_SKIP AND NOT DO_SKIP)
- set(DO_SKIP 1)
- set(RESET_SKIP 1)
- endif()
-
- if (START_SKIP)
- set(DO_SKIP 1)
- message("${GCOV_LINE_COUNT}: Start skip")
- endif()
-
- if (END_SKIP)
- set(DO_SKIP 0)
- endif()
-
- list(LENGTH RES RES_COUNT)
-
- if (RES_COUNT GREATER 2)
- list(GET RES 0 HITCOUNT)
- list(GET RES 1 LINE)
- list(GET RES 2 SOURCE)
-
- string(STRIP ${HITCOUNT} HITCOUNT)
- string(STRIP ${LINE} LINE)
-
- # Lines with 0 line numbers are metadata and can be ignored.
- if (NOT ${LINE} EQUAL 0)
-
- if (DO_SKIP)
- set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}null, ")
- else()
- # Translate the hitcount into valid JSON values.
- if (${HITCOUNT} STREQUAL "#####")
- set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}0, ")
- elseif (${HITCOUNT} STREQUAL "-")
- set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}null, ")
- else()
- set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}${HITCOUNT}, ")
- endif()
- endif()
- endif()
- else()
- message(WARNING "Failed to properly parse line (RES_COUNT = ${RES_COUNT}) ${GCOV_FILE}:${GCOV_LINE_COUNT}\n-->${GCOV_LINE}")
- endif()
-
- if (RESET_SKIP)
- set(DO_SKIP 0)
- endif()
- math(EXPR GCOV_LINE_COUNT "${GCOV_LINE_COUNT}+1")
- endforeach()
-
- message("${GCOV_LINE_COUNT} of ${LINE_COUNT} lines read!")
-
- # Advanced way of removing the trailing comma in the JSON array.
- # "[1, 2, 3, " -> "[1, 2, 3"
- string(REGEX REPLACE ",[ ]*$" "" GCOV_FILE_COVERAGE ${GCOV_FILE_COVERAGE})
-
- # Append the trailing ] to complete the JSON array.
- set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}]")
-
- # Generate the final JSON for this file.
- message("Generate JSON for file: ${GCOV_SRC_REL_PATH}...")
- string(CONFIGURE ${SRC_FILE_TEMPLATE} FILE_JSON)
-
- set(JSON_GCOV_FILES "${JSON_GCOV_FILES}${FILE_JSON}, ")
-endforeach()
-
-# Loop through all files we couldn't find any coverage for
-# as well, and generate JSON for those as well with 0% coverage.
-foreach(NOT_COVERED_SRC ${COVERAGE_SRCS_REMAINING})
-
- # Loads the source file as a list of lines.
- file(STRINGS ${NOT_COVERED_SRC} SRC_LINES)
-
- set(GCOV_FILE_COVERAGE "[")
- set(GCOV_FILE_SOURCE "")
-
- foreach (SOURCE ${SRC_LINES})
- set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}0, ")
-
- string(REPLACE "\\" "\\\\" SOURCE "${SOURCE}")
- string(REGEX REPLACE "\"" "\\\\\"" SOURCE "${SOURCE}")
- string(REPLACE "\t" "\\\\t" SOURCE "${SOURCE}")
- string(REPLACE "\r" "\\\\r" SOURCE "${SOURCE}")
- set(GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}${SOURCE}\\n")
- endforeach()
-
- # Remove trailing comma, and complete JSON array with ]
- string(REGEX REPLACE ",[ ]*$" "" GCOV_FILE_COVERAGE ${GCOV_FILE_COVERAGE})
- set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}]")
-
- # Generate the final JSON for this file.
- string(CONFIGURE ${SRC_FILE_TEMPLATE} FILE_JSON)
- set(JSON_GCOV_FILES "${JSON_GCOV_FILES}${FILE_JSON}, ")
-endforeach()
-
-# Get rid of trailing comma.
-string(REGEX REPLACE ",[ ]*$" "" JSON_GCOV_FILES ${JSON_GCOV_FILES})
-set(JSON_GCOV_FILES "${JSON_GCOV_FILES}]")
-
-# Generate the final complete JSON!
-message("Generate final JSON...")
-string(CONFIGURE ${JSON_TEMPLATE} JSON)
-
-file(WRITE "${COVERALLS_OUTPUT_FILE}" "${JSON}")
-message("###########################################################################")
-message("Generated coveralls JSON containing coverage data:")
-message("${COVERALLS_OUTPUT_FILE}")
-message("###########################################################################")
diff --git a/cmake/cross_compiling/android.cmake b/cmake/cross_compiling/android.cmake
deleted file mode 100644
index 11a803ff031706a10f282f21024915be68444546..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/android.cmake
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT ARM_TARGET_OS STREQUAL "android")
- return()
-endif()
-
-set(ANDROID TRUE)
-add_definitions(-DLITE_WITH_LINUX)
-
-if(NOT DEFINED ANDROID_NDK)
- set(ANDROID_NDK $ENV{NDK_ROOT})
- if(NOT ANDROID_NDK)
- message(FATAL_ERROR "Must set ANDROID_NDK or env NDK_ROOT")
- endif()
-endif()
-
-if(ARM_TARGET_LANG STREQUAL "gcc")
- # gcc do not need set lang on android
- set(ARM_TARGET_LANG "")
-endif()
-
-if(NOT DEFINED ANDROID_API_LEVEL)
- set(ANDROID_API_LEVEL "22")
-endif()
-
-# then check input arm abi
-if(ARM_TARGET_ARCH_ABI STREQUAL "armv7hf")
- message(FATAL_ERROR "ANDROID does not support hardfp on v7 use armv7 instead.")
-endif()
-
-set(ANDROID_ARCH_ABI ${ARM_TARGET_ARCH_ABI} CACHE STRING "Choose Android Arch ABI")
-if(ARM_TARGET_ARCH_ABI STREQUAL "armv8")
- set(ANDROID_ARCH_ABI "arm64-v8a")
-endif()
-
-if(ARM_TARGET_ARCH_ABI STREQUAL "armv7")
- set(ANDROID_ARCH_ABI "armeabi-v7a")
-endif()
-
-check_input_var(ANDROID_ARCH_ABI DEFAULT ${ANDROID_ARCH_ABI} LIST "arm64-v8a" "armeabi-v7a"
- "armeabi-v6" "armeabi" "mips" "mips64" "x86" "x86_64")
-check_input_var(ANDROID_STL_TYPE DEFAULT "c++_static" LIST "c++_static" "gnustl_static" "c++_shared")
-
-if(ANDROID_ARCH_ABI STREQUAL "armeabi-v7a")
- message(STATUS "armeabi-v7a use softfp by default.")
- set(CMAKE_ANDROID_ARM_NEON ON)
- message(STATUS "NEON is enabled on arm-v7a with softfp.")
-endif()
-
-set(CMAKE_SYSTEM_NAME Android)
-set(CMAKE_SYSTEM_VERSION ${ANDROID_API_LEVEL})
-set(CMAKE_ANDROID_ARCH_ABI ${ANDROID_ARCH_ABI})
-set(CMAKE_ANDROID_NDK ${ANDROID_NDK})
-set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION ${ARM_TARGET_LANG})
-set(CMAKE_ANDROID_STL_TYPE ${ANDROID_STL_TYPE})
-
-if (ARM_TARGET_LANG STREQUAL "clang")
- if(ARM_TARGET_ARCH_ABI STREQUAL "armv8")
- set(triple aarch64-v8a-linux-android)
- elseif(ARM_TARGET_ARCH_ABI STREQUAL "armv7")
- set(triple arm-v7a-linux-android)
- set(LITE_WITH_OPENMP OFF CACHE STRING "Due to libomp's bug(For ARM64, it has been fixed by https://reviews.llvm.org/D19879, but still exists on ARM32), disable OpenMP on armv7 when cross-compiling using Clang" FORCE)
- else()
- message(FATAL_ERROR "Clang do not support this ${ARM_TARGET_ARCH_ABI}, use armv8 or armv7")
- endif()
-
- set(CMAKE_C_COMPILER clang)
- set(CMAKE_C_COMPILER_TARGET ${triple})
- set(CMAKE_CXX_COMPILER clang++)
- set(CMAKE_CXX_COMPILER_TARGET ${triple})
-
- message(STATUS "CMAKE_CXX_COMPILER_TARGET: ${CMAKE_CXX_COMPILER_TARGET}")
-endif()
diff --git a/cmake/cross_compiling/armlinux.cmake b/cmake/cross_compiling/armlinux.cmake
deleted file mode 100644
index 98f23d43005fbccc9bedb30b968d13d0fc807c61..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/armlinux.cmake
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT ARM_TARGET_OS STREQUAL "armlinux")
- return()
-endif()
-
-set(ARMLINUX TRUE)
-add_definitions(-DLITE_WITH_LINUX)
-set(CMAKE_SYSTEM_NAME Linux)
-
-check_input_var(ARMLINUX_ARCH_ABI DEFAULT ${ARM_TARGET_ARCH_ABI} LIST "armv8" "armv7" "armv7hf")
-
-if(ARMLINUX_ARCH_ABI STREQUAL "armv8")
- set(CMAKE_SYSTEM_PROCESSOR aarch64)
- set(CMAKE_C_COMPILER "aarch64-linux-gnu-gcc")
- set(CMAKE_CXX_COMPILER "aarch64-linux-gnu-g++")
-endif()
-
-if(ARMLINUX_ARCH_ABI STREQUAL "armv7")
- set(CMAKE_SYSTEM_PROCESSOR arm)
- set(CMAKE_C_COMPILER "arm-linux-gnueabi-gcc")
- set(CMAKE_CXX_COMPILER "arm-linux-gnueabi-g++")
-endif()
-
-if(ARMLINUX_ARCH_ABI STREQUAL "armv7hf")
- set(CMAKE_SYSTEM_PROCESSOR arm)
- set(CMAKE_C_COMPILER "arm-linux-gnueabihf-gcc")
- set(CMAKE_CXX_COMPILER "arm-linux-gnueabihf-g++")
-endif()
diff --git a/cmake/cross_compiling/findar.cmake b/cmake/cross_compiling/findar.cmake
deleted file mode 100644
index bcb0dc70fd811a5041244dedb4a4bcf5b540dc3a..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/findar.cmake
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT ARM_TARGET_LANG STREQUAL "clang")
- # only clang need find ar tool
- return()
-endif()
-
-if(NOT EXISTS "${CMAKE_CXX_COMPILER}")
- message(ERROR "Can not find CMAKE_CXX_COMPILER ${CMAKE_CXX_COMPILER}")
-endif()
-
-get_filename_component(AR_PATH ${CMAKE_CXX_COMPILER} PATH)
-
-find_file(AR_TOOL NAMES llvm-ar PATHS ${AR_PATH})
-
-if(NOT AR_TOOL)
- message(ERROR "Failed to find AR_TOOL in ${AR_PATH}")
-else()
- set(CMAKE_AR ${AR_TOOL})
- message(STATUS "Found CMAKE_AR : " ${CMAKE_AR})
-endif()
diff --git a/cmake/cross_compiling/host.cmake b/cmake/cross_compiling/host.cmake
deleted file mode 100644
index b76dd60046735b596c457255a5dec5379a253d3b..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/host.cmake
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set(HOST_C_COMPILER $ENV{CC})
-set(HOST_CXX_COMPILER $ENV{CXX})
-
-if(IOS)
- set(default_cc clang)
- set(default_cxx clang++)
-else()
- set(default_cc gcc)
- set(default_cxx g++)
-endif()
-
-if(NOT HOST_C_COMPILER)
- find_program(HOST_C_COMPILER NAMES ${default_cc} PATH
- /usr/bin
- /usr/local/bin)
-endif()
-
-if(NOT HOST_CXX_COMPILER)
- find_program(HOST_CXX_COMPILER NAMES ${default_cxx} PATH
- /usr/bin
- /usr/local/bin)
-endif()
-
-if(NOT HOST_C_COMPILER OR NOT EXISTS ${HOST_C_COMPILER})
- MESSAGE(FATAL_ERROR "Cannot find host C compiler. export CC=/path/to/cc")
-ENDIF()
-
-if(NOT HOST_CXX_COMPILER OR NOT EXISTS ${HOST_CXX_COMPILER})
- MESSAGE(FATAL_ERROR "Cannot find host C compiler. export CC=/path/to/cc")
-ENDIF()
-
-MESSAGE(STATUS "Found host C compiler: " ${HOST_C_COMPILER})
-MESSAGE(STATUS "Found host CXX compiler: " ${HOST_CXX_COMPILER})
-
diff --git a/cmake/cross_compiling/ios.cmake b/cmake/cross_compiling/ios.cmake
deleted file mode 100644
index 76f62765aff791594123d689341b0876b3d0184d..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/ios.cmake
+++ /dev/null
@@ -1,692 +0,0 @@
-# This file is part of the ios-cmake project. It was retrieved from
-# https://github.com/cristeab/ios-cmake.git, which is a fork of
-# https://code.google.com/p/ios-cmake/. Which in turn is based off of
-# the Platform/Darwin.cmake and Platform/UnixPaths.cmake files which
-# are included with CMake 2.8.4
-#
-# The ios-cmake project is licensed under the new BSD license.
-#
-# Copyright (c) 2014, Bogdan Cristea and LTE Engineering Software,
-# Kitware, Inc., Insight Software Consortium. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# 3. Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# This file is based off of the Platform/Darwin.cmake and
-# Platform/UnixPaths.cmake files which are included with CMake 2.8.4
-# It has been altered for iOS development.
-#
-# Updated by Alex Stewart (alexs.mac@gmail.com)
-#
-# *****************************************************************************
-# Now maintained by Alexander Widerberg (widerbergaren [at] gmail.com)
-# under the BSD-3-Clause license
-# https://github.com/leetal/ios-cmake
-# *****************************************************************************
-#
-# INFORMATION / HELP
-#
-# The following arguments control the behaviour of this toolchain:
-#
-# PLATFORM: (default "OS")
-# OS = Build for iPhoneOS.
-# OS64 = Build for arm64 iphoneOS.
-# OS64COMBINED = Build for arm64 x86_64 iphoneOS. Combined into FAT STATIC lib (supported on 3.14+ of CMakewith "-G Xcode" argument ONLY)
-# SIMULATOR = Build for x86 i386 iphoneOS Simulator.
-# SIMULATOR64 = Build for x86_64 iphoneOS Simulator.
-# TVOS = Build for arm64 tvOS.
-# TVOSCOMBINED = Build for arm64 x86_64 tvOS. Combined into FAT STATIC lib (supported on 3.14+ of CMake with "-G Xcode" argument ONLY)
-# SIMULATOR_TVOS = Build for x86_64 tvOS Simulator.
-# WATCHOS = Build for armv7k arm64_32 for watchOS.
-# WATCHOSCOMBINED = Build for armv7k arm64_32 x86_64 watchOS. Combined into FAT STATIC lib (supported on 3.14+ of CMake with "-G Xcode" argument ONLY)
-# SIMULATOR_WATCHOS = Build for x86_64 for watchOS Simulator.
-#
-# CMAKE_OSX_SYSROOT: Path to the SDK to use. By default this is
-# automatically determined from PLATFORM and xcodebuild, but
-# can also be manually specified (although this should not be required).
-#
-# CMAKE_DEVELOPER_ROOT: Path to the Developer directory for the platform
-# being compiled for. By default this is automatically determined from
-# CMAKE_OSX_SYSROOT, but can also be manually specified (although this should
-# not be required).
-#
-# DEPLOYMENT_TARGET: Minimum SDK version to target. Default 2.0 on watchOS and 9.0 on tvOS+iOS
-#
-# ENABLE_BITCODE: (1|0) Enables or disables bitcode support. Default 1 (true)
-#
-# ENABLE_ARC: (1|0) Enables or disables ARC support. Default 1 (true, ARC enabled by default)
-#
-# ENABLE_VISIBILITY: (1|0) Enables or disables symbol visibility support. Default 0 (false, visibility hidden by default)
-#
-# ARCHS: (armv7 armv7s armv7k arm64 arm64_32 i386 x86_64) If specified, will override the default architectures for the given PLATFORM
-# OS = armv7 armv7s arm64 (if applicable)
-# OS64 = arm64 (if applicable)
-# SIMULATOR = i386
-# SIMULATOR64 = x86_64
-# TVOS = arm64
-# SIMULATOR_TVOS = x86_64 (i386 has since long been deprecated)
-# WATCHOS = armv7k arm64_32 (if applicable)
-# SIMULATOR_WATCHOS = x86_64 (i386 has since long been deprecated)
-#
-# This toolchain defines the following variables for use externally:
-#
-# XCODE_VERSION: Version number (not including Build version) of Xcode detected.
-# SDK_VERSION: Version of SDK being used.
-# CMAKE_OSX_ARCHITECTURES: Architectures being compiled for (generated from PLATFORM).
-#
-# This toolchain defines the following macros for use externally:
-#
-# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE XCODE_VARIANT)
-# A convenience macro for setting xcode specific properties on targets.
-# Available variants are: All, Release, RelWithDebInfo, Debug, MinSizeRel
-# example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1" "all").
-#
-# find_host_package (PROGRAM ARGS)
-# A macro used to find executable programs on the host system, not within the
-# environment. Thanks to the android-cmake project for providing the
-# command.
-#
-# ******************************** DEPRECATIONS *******************************
-#
-# IOS_DEPLOYMENT_TARGET: (Deprecated) Alias to DEPLOYMENT_TARGET
-# CMAKE_IOS_DEVELOPER_ROOT: (Deprecated) Alias to CMAKE_DEVELOPER_ROOT
-# IOS_PLATFORM: (Deprecated) Alias to PLATFORM
-# IOS_ARCH: (Deprecated) Alias to ARCHS
-#
-# *****************************************************************************
-#
-
-## Lite settings
-if (ARM_TARGET_OS STREQUAL "ios")
- set(PLATFORM "OS")
-elseif(ARM_TARGET_OS STREQUAL "ios64")
- set(PLATFORM "OS64")
-else()
- return()
-endif()
-add_definitions(-DTARGET_IOS)
-
-# if do not specify the ARM_TARGET_ARCH_ABI then use default all supported
-if(ARM_TARGET_ARCH_ABI STREQUAL "armv7"
- OR ARM_TARGET_ARCH_ABI STREQUAL "armv7hf"
- OR ARM_TARGET_ARCH_ABI STREQUAL "armeabi-v7a")
- set(ARCHS "armv7")
-elseif(ARM_TARGET_ARCH_ABI STREQUAL "armv8"
- OR ARM_TARGET_ARCH_ABI STREQUAL "arm64-v8a")
- set(ARCHS "arm64")
-# else() all default choice: armv7 armv7s arm64
-endif()
-
-if(PLATFORM STREQUAL "OS64" AND ARCHS STREQUAL "armv7")
- message(FATAL_ERROR "Can not build IOS64 with armv7")
-endif()
-
-# TODO(xxx): enable omp on ios
-set(LITE_WITH_OPENMP OFF CACHE STRING "Disable OpenMP when cross-compiling for Android and iOS" FORCE)
-set(ARM_TARGET_LANG "clang" CACHE STRING "Force use clang on IOS" FORCE)
-
-add_definitions(-DLITE_WITH_IPHONE)
-## End lite settings
-
-# Fix for PThread library not in path
-set(CMAKE_THREAD_LIBS_INIT "-lpthread")
-set(CMAKE_HAVE_THREADS_LIBRARY 1)
-set(CMAKE_USE_WIN32_THREADS_INIT 0)
-set(CMAKE_USE_PTHREADS_INIT 1)
-
-# Cache what generator is used
-set(USED_CMAKE_GENERATOR "${CMAKE_GENERATOR}" CACHE STRING "Expose CMAKE_GENERATOR" FORCE)
-
-if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14")
- set(MODERN_CMAKE YES)
- message(STATUS "Merging integrated CMake 3.14+ iOS,tvOS,watchOS,macOS toolchain(s) with this toolchain!")
-endif()
-
-# Get the Xcode version being used.
-execute_process(COMMAND xcodebuild -version
- OUTPUT_VARIABLE XCODE_VERSION
- ERROR_QUIET
- OUTPUT_STRIP_TRAILING_WHITESPACE)
-string(REGEX MATCH "Xcode [0-9\\.]+" XCODE_VERSION "${XCODE_VERSION}")
-string(REGEX REPLACE "Xcode ([0-9\\.]+)" "\\1" XCODE_VERSION "${XCODE_VERSION}")
-message(STATUS "Building with Xcode version: ${XCODE_VERSION}")
-
-######## ALIASES (DEPRECATION WARNINGS)
-
-if(DEFINED IOS_PLATFORM)
- set(PLATFORM ${IOS_PLATFORM})
- message(DEPRECATION "IOS_PLATFORM argument is DEPRECATED. Consider using the new PLATFORM argument instead.")
-endif()
-
-if(DEFINED IOS_DEPLOYMENT_TARGET)
- set(DEPLOYMENT_TARGET ${IOS_DEPLOYMENT_TARGET})
- message(DEPRECATION "IOS_DEPLOYMENT_TARGET argument is DEPRECATED. Consider using the new DEPLOYMENT_TARGET argument instead.")
-endif()
-
-if(DEFINED CMAKE_IOS_DEVELOPER_ROOT)
- set(CMAKE_DEVELOPER_ROOT ${CMAKE_IOS_DEVELOPER_ROOT})
- message(DEPRECATION "CMAKE_IOS_DEVELOPER_ROOT argument is DEPRECATED. Consider using the new CMAKE_DEVELOPER_ROOT argument instead.")
-endif()
-
-if(DEFINED IOS_ARCH)
- set(ARCHS ${IOS_ARCH})
- message(DEPRECATION "IOS_ARCH argument is DEPRECATED. Consider using the new ARCHS argument instead.")
-endif()
-
-######## END ALIASES
-
-# Unset the FORCE on cache variables if in try_compile()
-set(FORCE_CACHE FORCE)
-get_property(_CMAKE_IN_TRY_COMPILE GLOBAL PROPERTY IN_TRY_COMPILE)
-if(_CMAKE_IN_TRY_COMPILE)
- unset(FORCE_CACHE)
-endif()
-
-# Default to building for iPhoneOS if not specified otherwise, and we cannot
-# determine the platform from the CMAKE_OSX_ARCHITECTURES variable. The use
-# of CMAKE_OSX_ARCHITECTURES is such that try_compile() projects can correctly
-# determine the value of PLATFORM from the root project, as
-# CMAKE_OSX_ARCHITECTURES is propagated to them by CMake.
-if(NOT DEFINED PLATFORM)
- if (CMAKE_OSX_ARCHITECTURES)
- if(CMAKE_OSX_ARCHITECTURES MATCHES ".*arm.*" AND CMAKE_OSX_SYSROOT MATCHES ".*iphoneos.*")
- set(PLATFORM "OS")
- elseif(CMAKE_OSX_ARCHITECTURES MATCHES "i386" AND CMAKE_OSX_SYSROOT MATCHES ".*iphonesimulator.*")
- set(PLATFORM "SIMULATOR")
- elseif(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" AND CMAKE_OSX_SYSROOT MATCHES ".*iphonesimulator.*")
- set(PLATFORM "SIMULATOR64")
- elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64" AND CMAKE_OSX_SYSROOT MATCHES ".*appletvos.*")
- set(PLATFORM "TVOS")
- elseif(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" AND CMAKE_OSX_SYSROOT MATCHES ".*appletvsimulator.*")
- set(PLATFORM "SIMULATOR_TVOS")
- elseif(CMAKE_OSX_ARCHITECTURES MATCHES ".*armv7k.*" AND CMAKE_OSX_SYSROOT MATCHES ".*watchos.*")
- set(PLATFORM "WATCHOS")
- elseif(CMAKE_OSX_ARCHITECTURES MATCHES "i386" AND CMAKE_OSX_SYSROOT MATCHES ".*watchsimulator.*")
- set(PLATFORM "SIMULATOR_WATCHOS")
- endif()
- endif()
- if (NOT PLATFORM)
- set(PLATFORM "OS")
- endif()
-endif()
-
-set(PLATFORM_INT "${PLATFORM}" CACHE STRING "Type of platform for which the build targets.")
-
-# Handle the case where we are targeting iOS and a version above 10.0 (32-bit support dropped officially)
-if(PLATFORM_INT STREQUAL "OS" AND DEPLOYMENT_TARGET VERSION_GREATER_EQUAL 10.0)
- set(PLATFORM_INT "OS64")
- message(STATUS "Targeting minimum SDK version ${DEPLOYMENT_TARGET}. Dropping 32-bit support.")
-elseif(PLATFORM_INT STREQUAL "SIMULATOR" AND DEPLOYMENT_TARGET VERSION_GREATER_EQUAL 10.0)
- set(PLATFORM_INT "SIMULATOR64")
- message(STATUS "Targeting minimum SDK version ${DEPLOYMENT_TARGET}. Dropping 32-bit support.")
-endif()
-
-# Determine the platform name and architectures for use in xcodebuild commands
-# from the specified PLATFORM name.
-if(PLATFORM_INT STREQUAL "OS")
- set(SDK_NAME iphoneos)
- if(NOT ARCHS)
- set(ARCHS armv7 armv7s arm64)
- endif()
-elseif(PLATFORM_INT STREQUAL "OS64")
- set(SDK_NAME iphoneos)
- if(NOT ARCHS)
- if (XCODE_VERSION VERSION_GREATER 10.0)
- set(ARCHS arm64) # Add arm64e when Apple have fixed the integration issues with it, libarclite_iphoneos.a is currently missung bitcode markers for example
- else()
- set(ARCHS arm64)
- endif()
- endif()
-elseif(PLATFORM_INT STREQUAL "OS64COMBINED")
- set(SDK_NAME iphoneos)
- if(MODERN_CMAKE)
- if(NOT ARCHS)
- if (XCODE_VERSION VERSION_GREATER 10.0)
- set(ARCHS arm64 x86_64) # Add arm64e when Apple have fixed the integration issues with it, libarclite_iphoneos.a is currently missung bitcode markers for example
- else()
- set(ARCHS arm64 x86_64)
- endif()
- endif()
- else()
- message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the OS64COMBINED setting work")
- endif()
-elseif(PLATFORM_INT STREQUAL "SIMULATOR")
- set(SDK_NAME iphonesimulator)
- if(NOT ARCHS)
- set(ARCHS i386)
- endif()
- message(DEPRECATION "SIMULATOR IS DEPRECATED. Consider using SIMULATOR64 instead.")
-elseif(PLATFORM_INT STREQUAL "SIMULATOR64")
- set(SDK_NAME iphonesimulator)
- if(NOT ARCHS)
- set(ARCHS x86_64)
- endif()
-elseif(PLATFORM_INT STREQUAL "TVOS")
- set(SDK_NAME appletvos)
- if(NOT ARCHS)
- set(ARCHS arm64)
- endif()
-elseif (PLATFORM_INT STREQUAL "TVOSCOMBINED")
- set(SDK_NAME appletvos)
- if(MODERN_CMAKE)
- if(NOT ARCHS)
- set(ARCHS arm64 x86_64)
- endif()
- else()
- message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the TVOSCOMBINED setting work")
- endif()
-elseif(PLATFORM_INT STREQUAL "SIMULATOR_TVOS")
- set(SDK_NAME appletvsimulator)
- if(NOT ARCHS)
- set(ARCHS x86_64)
- endif()
-elseif(PLATFORM_INT STREQUAL "WATCHOS")
- set(SDK_NAME watchos)
- if(NOT ARCHS)
- if (XCODE_VERSION VERSION_GREATER 10.0)
- set(ARCHS armv7k arm64_32)
- else()
- set(ARCHS armv7k)
- endif()
- endif()
-elseif(PLATFORM_INT STREQUAL "WATCHOSCOMBINED")
- set(SDK_NAME watchos)
- if(MODERN_CMAKE)
- if(NOT ARCHS)
- if (XCODE_VERSION VERSION_GREATER 10.0)
- set(ARCHS armv7k arm64_32 i386)
- else()
- set(ARCHS armv7k i386)
- endif()
- endif()
- else()
- message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the WATCHOSCOMBINED setting work")
- endif()
-elseif(PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS")
- set(SDK_NAME watchsimulator)
- if(NOT ARCHS)
- set(ARCHS i386)
- endif()
-else()
- message(FATAL_ERROR "Invalid PLATFORM: ${PLATFORM_INT}")
-endif()
-message(STATUS "Configuring ${SDK_NAME} build for platform: ${PLATFORM_INT}, architecture(s): ${ARCHS}")
-
-if(MODERN_CMAKE AND PLATFORM_INT MATCHES ".*COMBINED" AND NOT USED_CMAKE_GENERATOR MATCHES "Xcode")
- message(FATAL_ERROR "The COMBINED options only work with Xcode generator, -G Xcode")
-endif()
-
-# If user did not specify the SDK root to use, then query xcodebuild for it.
-execute_process(COMMAND xcodebuild -version -sdk ${SDK_NAME} Path
- OUTPUT_VARIABLE CMAKE_OSX_SYSROOT_INT
- ERROR_QUIET
- OUTPUT_STRIP_TRAILING_WHITESPACE)
-if (NOT DEFINED CMAKE_OSX_SYSROOT_INT AND NOT DEFINED CMAKE_OSX_SYSROOT)
- message(SEND_ERROR "Please make sure that Xcode is installed and that the toolchain"
- "is pointing to the correct path. Please run:"
- "sudo xcode-select -s /Applications/Xcode.app/Contents/Developer"
- "and see if that fixes the problem for you.")
- message(FATAL_ERROR "Invalid CMAKE_OSX_SYSROOT: ${CMAKE_OSX_SYSROOT} "
- "does not exist.")
-elseif(DEFINED CMAKE_OSX_SYSROOT)
- message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT} for platform: ${PLATFORM_INT} when checking compatibility")
-elseif(DEFINED CMAKE_OSX_SYSROOT_INT)
- message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT_INT} for platform: ${PLATFORM_INT}")
- set(CMAKE_OSX_SYSROOT "${CMAKE_OSX_SYSROOT_INT}" CACHE INTERNAL "")
-endif()
-
-# Set Xcode property for SDKROOT as well if Xcode generator is used
-if(USED_CMAKE_GENERATOR MATCHES "Xcode")
- set(CMAKE_OSX_SYSROOT "${SDK_NAME}" CACHE INTERNAL "")
- if(NOT DEFINED CMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM)
- set(CMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM 123456789A CACHE INTERNAL "")
- endif()
-endif()
-
-# Specify minimum version of deployment target.
-if(NOT DEFINED DEPLOYMENT_TARGET)
- if (PLATFORM_INT STREQUAL "WATCHOS" OR PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS")
- # Unless specified, SDK version 2.0 is used by default as minimum target version (watchOS).
- set(DEPLOYMENT_TARGET "2.0"
- CACHE STRING "Minimum SDK version to build for." )
- else()
- # Unless specified, SDK version 9.0 is used by default as minimum target version (iOS, tvOS).
- set(DEPLOYMENT_TARGET "9.0"
- CACHE STRING "Minimum SDK version to build for." )
- endif()
- message(STATUS "Using the default min-version since DEPLOYMENT_TARGET not provided!")
-endif()
-# Use bitcode or not
-if(NOT DEFINED ENABLE_BITCODE AND NOT ARCHS MATCHES "((^|, )(i386|x86_64))+")
- # Unless specified, enable bitcode support by default
- message(STATUS "Enabling bitcode support by default. ENABLE_BITCODE not provided!")
- set(ENABLE_BITCODE TRUE)
-elseif(NOT DEFINED ENABLE_BITCODE)
- message(STATUS "Disabling bitcode support by default on simulators. ENABLE_BITCODE not provided for override!")
- set(ENABLE_BITCODE FALSE)
-endif()
-set(ENABLE_BITCODE_INT ${ENABLE_BITCODE} CACHE BOOL "Whether or not to enable bitcode" ${FORCE_CACHE})
-# Use ARC or not
-if(NOT DEFINED ENABLE_ARC)
- # Unless specified, enable ARC support by default
- set(ENABLE_ARC TRUE)
- message(STATUS "Enabling ARC support by default. ENABLE_ARC not provided!")
-endif()
-set(ENABLE_ARC_INT ${ENABLE_ARC} CACHE BOOL "Whether or not to enable ARC" ${FORCE_CACHE})
-# Use hidden visibility or not
-if(NOT DEFINED ENABLE_VISIBILITY)
- # Unless specified, disable symbols visibility by default
- set(ENABLE_VISIBILITY FALSE)
- message(STATUS "Hiding symbols visibility by default. ENABLE_VISIBILITY not provided!")
-endif()
-set(ENABLE_VISIBILITY_INT ${ENABLE_VISIBILITY} CACHE BOOL "Whether or not to hide symbols (-fvisibility=hidden)" ${FORCE_CACHE})
-# Get the SDK version information.
-execute_process(COMMAND xcodebuild -sdk ${CMAKE_OSX_SYSROOT} -version SDKVersion
- OUTPUT_VARIABLE SDK_VERSION
- ERROR_QUIET
- OUTPUT_STRIP_TRAILING_WHITESPACE)
-
-# Find the Developer root for the specific iOS platform being compiled for
-# from CMAKE_OSX_SYSROOT. Should be ../../ from SDK specified in
-# CMAKE_OSX_SYSROOT. There does not appear to be a direct way to obtain
-# this information from xcrun or xcodebuild.
-if (NOT DEFINED CMAKE_DEVELOPER_ROOT AND NOT USED_CMAKE_GENERATOR MATCHES "Xcode")
- get_filename_component(PLATFORM_SDK_DIR ${CMAKE_OSX_SYSROOT} PATH)
- get_filename_component(CMAKE_DEVELOPER_ROOT ${PLATFORM_SDK_DIR} PATH)
-
- if (NOT DEFINED CMAKE_DEVELOPER_ROOT)
- message(FATAL_ERROR "Invalid CMAKE_DEVELOPER_ROOT: "
- "${CMAKE_DEVELOPER_ROOT} does not exist.")
- endif()
-endif()
-# Find the C & C++ compilers for the specified SDK.
-if(NOT CMAKE_C_COMPILER)
- execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang
- OUTPUT_VARIABLE CMAKE_C_COMPILER
- ERROR_QUIET
- OUTPUT_STRIP_TRAILING_WHITESPACE)
- message(STATUS "Using C compiler: ${CMAKE_C_COMPILER}")
-endif()
-if(NOT CMAKE_CXX_COMPILER)
- execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang++
- OUTPUT_VARIABLE CMAKE_CXX_COMPILER
- ERROR_QUIET
- OUTPUT_STRIP_TRAILING_WHITESPACE)
- message(STATUS "Using CXX compiler: ${CMAKE_CXX_COMPILER}")
-endif()
-# Find (Apple's) libtool.
-execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find libtool
- OUTPUT_VARIABLE BUILD_LIBTOOL
- ERROR_QUIET
- OUTPUT_STRIP_TRAILING_WHITESPACE)
-message(STATUS "Using libtool: ${BUILD_LIBTOOL}")
-# Configure libtool to be used instead of ar + ranlib to build static libraries.
-# This is required on Xcode 7+, but should also work on previous versions of
-# Xcode.
-set(CMAKE_C_CREATE_STATIC_LIBRARY
- "${BUILD_LIBTOOL} -static -o ")
-set(CMAKE_CXX_CREATE_STATIC_LIBRARY
- "${BUILD_LIBTOOL} -static -o ")
-# Get the version of Darwin (OS X) of the host.
-execute_process(COMMAND uname -r
- OUTPUT_VARIABLE CMAKE_HOST_SYSTEM_VERSION
- ERROR_QUIET
- OUTPUT_STRIP_TRAILING_WHITESPACE)
-# CMake 3.14+ support building for iOS, watchOS and tvOS out of the box.
-if(MODERN_CMAKE)
- if(SDK_NAME MATCHES "iphone")
- set(CMAKE_SYSTEM_NAME iOS CACHE INTERNAL "" ${FORCE_CACHE})
- elseif(SDK_NAME MATCHES "appletv")
- set(CMAKE_SYSTEM_NAME tvOS CACHE INTERNAL "" ${FORCE_CACHE})
- elseif(SDK_NAME MATCHES "watch")
- set(CMAKE_SYSTEM_NAME watchOS CACHE INTERNAL "" ${FORCE_CACHE})
- endif()
-
- # Provide flags for a combined FAT library build on newer CMake versions
- if(PLATFORM_INT MATCHES ".*COMBINED")
- set(CMAKE_XCODE_ATTRIBUTE_ONLY_ACTIVE_ARCH NO CACHE INTERNAL "")
- set(CMAKE_IOS_INSTALL_COMBINED YES CACHE INTERNAL "")
- message(STATUS "Will combine built (static) artifacts into FAT lib...")
- endif()
-else()
- # Legacy code path prior to CMake 3.14
- set(CMAKE_SYSTEM_NAME Darwin CACHE INTERNAL "" ${FORCE_CACHE})
-endif()
-# Standard settings.
-set(CMAKE_SYSTEM_VERSION ${SDK_VERSION} CACHE INTERNAL "")
-set(UNIX TRUE CACHE BOOL "")
-set(APPLE TRUE CACHE BOOL "")
-set(IOS TRUE CACHE BOOL "")
-set(CMAKE_AR ar CACHE FILEPATH "" FORCE)
-set(CMAKE_RANLIB ranlib CACHE FILEPATH "" FORCE)
-set(CMAKE_STRIP strip CACHE FILEPATH "" FORCE)
-# Set the architectures for which to build.
-set(CMAKE_OSX_ARCHITECTURES ${ARCHS} CACHE STRING "Build architecture for iOS")
-# Change the type of target generated for try_compile() so it'll work when cross-compiling
-set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
-# All iOS/Darwin specific settings - some may be redundant.
-set(CMAKE_SHARED_LIBRARY_PREFIX "lib")
-set(CMAKE_SHARED_LIBRARY_SUFFIX ".dylib")
-set(CMAKE_SHARED_MODULE_PREFIX "lib")
-set(CMAKE_SHARED_MODULE_SUFFIX ".so")
-set(CMAKE_C_COMPILER_ABI ELF)
-set(CMAKE_CXX_COMPILER_ABI ELF)
-set(CMAKE_C_HAS_ISYSROOT 1)
-set(CMAKE_CXX_HAS_ISYSROOT 1)
-set(CMAKE_MODULE_EXISTS 1)
-set(CMAKE_DL_LIBS "")
-set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ")
-set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ")
-set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}")
-set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}")
-
-if(ARCHS MATCHES "((^|, )(arm64|arm64e|x86_64))+")
- set(CMAKE_C_SIZEOF_DATA_PTR 8)
- set(CMAKE_CXX_SIZEOF_DATA_PTR 8)
- if(ARCHS MATCHES "((^|, )(arm64|arm64e))+")
- set(CMAKE_SYSTEM_PROCESSOR "arm64")
- else()
- set(CMAKE_SYSTEM_PROCESSOR "x86_64")
- endif()
- message(STATUS "Using a data_ptr size of 8")
-else()
- set(CMAKE_C_SIZEOF_DATA_PTR 4)
- set(CMAKE_CXX_SIZEOF_DATA_PTR 4)
- set(CMAKE_SYSTEM_PROCESSOR "arm")
- message(STATUS "Using a data_ptr size of 4")
-endif()
-
-message(STATUS "Building for minimum ${SDK_NAME} version: ${DEPLOYMENT_TARGET}"
- " (SDK version: ${SDK_VERSION})")
-# Note that only Xcode 7+ supports the newer more specific:
-# -m${SDK_NAME}-version-min flags, older versions of Xcode use:
-# -m(ios/ios-simulator)-version-min instead.
-if(PLATFORM_INT STREQUAL "OS" OR PLATFORM_INT STREQUAL "OS64")
- if(XCODE_VERSION VERSION_LESS 7.0)
- set(SDK_NAME_VERSION_FLAGS
- "-mios-version-min=${DEPLOYMENT_TARGET}")
- else()
- # Xcode 7.0+ uses flags we can build directly from SDK_NAME.
- set(SDK_NAME_VERSION_FLAGS
- "-m${SDK_NAME}-version-min=${DEPLOYMENT_TARGET}")
- endif()
-elseif(PLATFORM_INT STREQUAL "TVOS")
- set(SDK_NAME_VERSION_FLAGS
- "-mtvos-version-min=${DEPLOYMENT_TARGET}")
-elseif(PLATFORM_INT STREQUAL "SIMULATOR_TVOS")
- set(SDK_NAME_VERSION_FLAGS
- "-mtvos-simulator-version-min=${DEPLOYMENT_TARGET}")
-elseif(PLATFORM_INT STREQUAL "WATCHOS")
- set(SDK_NAME_VERSION_FLAGS
- "-mwatchos-version-min=${DEPLOYMENT_TARGET}")
-elseif(PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS")
- set(SDK_NAME_VERSION_FLAGS
- "-mwatchos-simulator-version-min=${DEPLOYMENT_TARGET}")
-else()
- # SIMULATOR or SIMULATOR64 both use -mios-simulator-version-min.
- set(SDK_NAME_VERSION_FLAGS
- "-mios-simulator-version-min=${DEPLOYMENT_TARGET}")
-endif()
-message(STATUS "Version flags set to: ${SDK_NAME_VERSION_FLAGS}")
-set(CMAKE_OSX_DEPLOYMENT_TARGET ${DEPLOYMENT_TARGET} CACHE STRING
- "Set CMake deployment target" ${FORCE_CACHE})
-
-if(ENABLE_BITCODE_INT)
- set(BITCODE "-fembed-bitcode")
- set(CMAKE_XCODE_ATTRIBUTE_BITCODE_GENERATION_MODE bitcode CACHE INTERNAL "")
- message(STATUS "Enabling bitcode support.")
-else()
- set(BITCODE "")
- set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE NO CACHE INTERNAL "")
- message(STATUS "Disabling bitcode support.")
-endif()
-
-if(ENABLE_ARC_INT)
- set(FOBJC_ARC "-fobjc-arc")
- set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC YES CACHE INTERNAL "")
- message(STATUS "Enabling ARC support.")
-else()
- set(FOBJC_ARC "-fno-objc-arc")
- set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC NO CACHE INTERNAL "")
- message(STATUS "Disabling ARC support.")
-endif()
-
-if(NOT ENABLE_VISIBILITY_INT)
- set(VISIBILITY "-fvisibility=hidden")
- set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN YES CACHE INTERNAL "")
- message(STATUS "Hiding symbols (-fvisibility=hidden).")
-else()
- set(VISIBILITY "")
- set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN NO CACHE INTERNAL "")
-endif()
-
-#Check if Xcode generator is used, since that will handle these flags automagically
-if(USED_CMAKE_GENERATOR MATCHES "Xcode")
- message(STATUS "Not setting any manual command-line buildflags, since Xcode is selected as generator.")
-else()
- set(CMAKE_C_FLAGS
- "${SDK_NAME_VERSION_FLAGS} ${BITCODE} -fobjc-abi-version=2 ${FOBJC_ARC} ${CMAKE_C_FLAGS}")
- # Hidden visibilty is required for C++ on iOS.
- set(CMAKE_CXX_FLAGS
- "${SDK_NAME_VERSION_FLAGS} ${BITCODE} ${VISIBILITY} -fvisibility-inlines-hidden -fobjc-abi-version=2 ${FOBJC_ARC} ${CMAKE_CXX_FLAGS}")
- set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} -O0 -g ${CMAKE_CXX_FLAGS_DEBUG}")
- set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS} -DNDEBUG -Os -ffast-math ${CMAKE_CXX_FLAGS_MINSIZEREL}")
- set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS} -DNDEBUG -O2 -g -ffast-math ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
- set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -DNDEBUG -O3 -ffast-math ${CMAKE_CXX_FLAGS_RELEASE}")
- set(CMAKE_C_LINK_FLAGS "${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}")
- set(CMAKE_CXX_LINK_FLAGS "${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_CXX_LINK_FLAGS}")
-
- # In order to ensure that the updated compiler flags are used in try_compile()
- # tests, we have to forcibly set them in the CMake cache, not merely set them
- # in the local scope.
- list(APPEND VARS_TO_FORCE_IN_CACHE
- CMAKE_C_FLAGS
- CMAKE_CXX_FLAGS
- CMAKE_CXX_FLAGS_DEBUG
- CMAKE_CXX_FLAGS_RELWITHDEBINFO
- CMAKE_CXX_FLAGS_MINSIZEREL
- CMAKE_CXX_FLAGS_RELEASE
- CMAKE_C_LINK_FLAGS
- CMAKE_CXX_LINK_FLAGS)
- foreach(VAR_TO_FORCE ${VARS_TO_FORCE_IN_CACHE})
- set(${VAR_TO_FORCE} "${${VAR_TO_FORCE}}" CACHE STRING "")
- endforeach()
-endif()
-
-set(CMAKE_PLATFORM_HAS_INSTALLNAME 1)
-set(CMAKE_SHARED_LINKER_FLAGS "-rpath @executable_path/Frameworks -rpath @loader_path/Frameworks")
-set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -Wl,-headerpad_max_install_names")
-set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -Wl,-headerpad_max_install_names")
-set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,")
-set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,")
-set(CMAKE_FIND_LIBRARY_SUFFIXES ".tbd" ".dylib" ".so" ".a")
-set(CMAKE_SHARED_LIBRARY_SONAME_C_FLAG "-install_name")
-
-# Hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old
-# build tree (where install_name_tool was hardcoded) and where
-# CMAKE_INSTALL_NAME_TOOL isn't in the cache and still cmake didn't fail in
-# CMakeFindBinUtils.cmake (because it isn't rerun) hardcode
-# CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did
-# before, Alex.
-if(NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
- find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool)
-endif(NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
-
-# Set the find root to the iOS developer roots and to user defined paths.
-set(CMAKE_FIND_ROOT_PATH ${CMAKE_DEVELOPER_ROOT} ${CMAKE_OSX_SYSROOT_INT}
- ${CMAKE_PREFIX_PATH} CACHE STRING "Root path that will be prepended to all search paths")
-# Default to searching for frameworks first.
-set(CMAKE_FIND_FRAMEWORK FIRST)
-# Set up the default search directories for frameworks.
-set(CMAKE_FRAMEWORK_PATH
- ${CMAKE_DEVELOPER_ROOT}/Library/Frameworks
- ${CMAKE_DEVELOPER_ROOT}/Library/PrivateFrameworks
- ${CMAKE_OSX_SYSROOT_INT}/System/Library/Frameworks
- ${CMAKE_FRAMEWORK_PATH} CACHE STRING "Frameworks search paths")
-
-# By default, search both the specified iOS SDK and the remainder of the host filesystem.
-if(NOT CMAKE_FIND_ROOT_PATH_MODE_PROGRAM)
- set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH CACHE STRING "" ${FORCE_CACHE})
-endif()
-if(NOT CMAKE_FIND_ROOT_PATH_MODE_LIBRARY)
- set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH CACHE STRING "" ${FORCE_CACHE})
-endif()
-if(NOT CMAKE_FIND_ROOT_PATH_MODE_INCLUDE)
- set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH CACHE STRING "" ${FORCE_CACHE})
-endif()
-if(NOT CMAKE_FIND_ROOT_PATH_MODE_PACKAGE)
- set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH CACHE STRING "" ${FORCE_CACHE})
-endif()
-
-#
-# Some helper-macros below to simplify and beautify the CMakeFile
-#
-
-# This little macro lets you set any XCode specific property.
-macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE XCODE_RELVERSION)
- set(XCODE_RELVERSION_I "${XCODE_RELVERSION}")
- if(XCODE_RELVERSION_I STREQUAL "All")
- set_property(TARGET ${TARGET} PROPERTY
- XCODE_ATTRIBUTE_${XCODE_PROPERTY} "${XCODE_VALUE}")
- else()
- set_property(TARGET ${TARGET} PROPERTY
- XCODE_ATTRIBUTE_${XCODE_PROPERTY}[variant=${XCODE_RELVERSION_I}] "${XCODE_VALUE}")
- endif()
-endmacro(set_xcode_property)
-# This macro lets you find executable programs on the host system.
-macro(find_host_package)
- set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
- set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
- set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
- set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE NEVER)
- set(IOS FALSE)
- find_package(${ARGN})
- set(IOS TRUE)
- set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH)
- set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH)
- set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH)
- set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH)
-endmacro(find_host_package)
diff --git a/cmake/cross_compiling/npu.cmake b/cmake/cross_compiling/npu.cmake
deleted file mode 100644
index 863200986c93ea09d3fa3049fe684b32c2fb52dd..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/npu.cmake
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT LITE_WITH_NPU)
- return()
-endif()
-
-if(NOT DEFINED NPU_DDK_ROOT)
- set(NPU_DDK_ROOT $ENV{NPU_DDK_ROOT})
- if(NOT NPU_DDK_ROOT)
- message(FATAL_ERROR "Must set NPU_DDK_ROOT or env NPU_DDK_ROOT when LITE_WITH_NPU=ON")
- endif()
-endif()
-
-message(STATUS "NPU_DDK_ROOT: ${NPU_DDK_ROOT}")
-find_path(NPU_DDK_INC NAMES HiAiModelManagerService.h
- PATHS ${NPU_DDK_ROOT}/include NO_DEFAULT_PATH)
-if(NOT NPU_DDK_INC)
- message(FATAL_ERROR "Can not find HiAiModelManagerService.h in ${NPU_DDK_ROOT}/include")
-endif()
-
-include_directories("${NPU_DDK_ROOT}")
-
-set(NPU_SUB_LIB_PATH "lib64")
-if(ARM_TARGET_ARCH_ABI STREQUAL "armv8")
- set(NPU_SUB_LIB_PATH "lib64")
-endif()
-
-if(ARM_TARGET_ARCH_ABI STREQUAL "armv7")
- set(NPU_SUB_LIB_PATH "lib")
-endif()
-
-find_library(NPU_DDK_HIAI_FILE NAMES hiai
- PATHS ${NPU_DDK_ROOT}/${NPU_SUB_LIB_PATH})
-
-find_library(NPU_DDK_IR_FILE NAMES hiai_ir
- PATHS ${NPU_DDK_ROOT}/${NPU_SUB_LIB_PATH})
-
-find_library(NPU_DDK_IR_BUILD_FILE NAMES hiai_ir_build
- PATHS ${NPU_DDK_ROOT}/${NPU_SUB_LIB_PATH})
-
-find_library(NPU_DDK_PROTO_FILE NAMES protobuf-lite
- PATHS ${NPU_DDK_ROOT}/${NPU_SUB_LIB_PATH})
-
-if(NOT NPU_DDK_HIAI_FILE)
- message(FATAL_ERROR "Can not find NPU_DDK_HIAI_FILE in ${NPU_DDK_ROOT}")
-else()
- message(STATUS "Found NPU_DDK HIAI Library: ${NPU_DDK_HIAI_FILE}")
- add_library(npu_ddk_hiai SHARED IMPORTED GLOBAL)
- set_property(TARGET npu_ddk_hiai PROPERTY IMPORTED_LOCATION ${NPU_DDK_HIAI_FILE})
-endif()
-
-if(NOT NPU_DDK_IR_FILE)
- message(FATAL_ERROR "Can not find NPU_DDK_IR_FILE in ${NPU_DDK_ROOT}")
-else()
- message(STATUS "Found NPU_DDK IR Library: ${NPU_DDK_IR_FILE}")
- add_library(npu_ddk_ir SHARED IMPORTED GLOBAL)
- set_property(TARGET npu_ddk_ir PROPERTY IMPORTED_LOCATION ${NPU_DDK_IR_FILE})
-endif()
-
-if(NOT NPU_DDK_IR_BUILD_FILE)
- message(FATAL_ERROR "Can not find NPU_DDK_IR_BUILD_FILE in ${NPU_DDK_ROOT}")
-else()
- message(STATUS "Found NPU_DDK IR_BUILD Library: ${NPU_DDK_IR_BUILD_FILE}")
- add_library(npu_ddk_ir_build SHARED IMPORTED GLOBAL)
- set_property(TARGET npu_ddk_ir_build PROPERTY IMPORTED_LOCATION ${NPU_DDK_IR_BUILD_FILE})
-endif()
-
-if(NOT NPU_DDK_PROTO_FILE)
- message(FATAL_ERROR "Can not find NPU_DDK_PROTO_FILE in ${NPU_DDK_ROOT}")
-else()
- message(STATUS "Found NPU_DDK Protobuf Library: ${NPU_DDK_PROTO_FILE}")
- add_library(npu_ddk_proto SHARED IMPORTED GLOBAL)
- set_property(TARGET npu_ddk_proto PROPERTY IMPORTED_LOCATION ${NPU_DDK_PROTO_FILE})
-endif()
-
-set(npu_ddk_libs npu_ddk_hiai npu_ddk_ir npu_ddk_ir_build npu_ddk_proto CACHE INTERNAL "npu ddk libs")
-
-
diff --git a/cmake/cross_compiling/postproject.cmake b/cmake/cross_compiling/postproject.cmake
deleted file mode 100644
index 33254df03c43c2648fb33effe491e5956edf60a9..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/postproject.cmake
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- return()
-endif()
-
-include(CheckCXXCompilerFlag)
-
-if(ANDROID)
- include(cross_compiling/findar)
-
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -llog -fPIC")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -llog -fPIC")
-endif()
-
-if(ARMLINUX)
- if(ARMLINUX_ARCH_ABI STREQUAL "armv8")
- set(CMAKE_CXX_FLAGS "-march=armv8-a ${CMAKE_CXX_FLAGS}")
- set(CMAKE_C_FLAGS "-march=armv8-a ${CMAKE_C_FLAGS}")
- message(STATUS "NEON is enabled on arm64-v8a")
- endif()
-
- if(ARMLINUX_ARCH_ABI STREQUAL "armv7")
- set(CMAKE_CXX_FLAGS "-march=armv7-a -mfloat-abi=softfp -mfpu=neon-vfpv4 ${CMAKE_CXX_FLAGS}")
- set(CMAKE_C_FLAGS "-march=armv7-a -mfloat-abi=softfp -mfpu=neon-vfpv4 ${CMAKE_C_FLAGS}")
- message(STATUS "NEON is enabled on arm-v7a with softfp")
- endif()
-
- if(ARMLINUX_ARCH_ABI STREQUAL "armv7hf")
- set(CMAKE_CXX_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_CXX_FLAGS}")
- set(CMAKE_C_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_C_FLAGS}" )
- message(STATUS "NEON is enabled on arm-v7a with hard float")
- endif()
-endif()
-
-function(check_linker_flag)
- foreach(flag ${ARGN})
- set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${flag}")
- check_cxx_compiler_flag("" out_var)
- if(${out_var})
- set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${flag}")
- endif()
- endforeach()
- set(CMAKE_SHARED_LINKER_FLAGS ${CMAKE_SHARED_LINKER_FLAGS} PARENT_SCOPE)
-endfunction()
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
-if (LITE_ON_TINY_PUBLISH)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math -Ofast -Os -fno-exceptions -fomit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto -fvisibility=hidden -fvisibility-inlines-hidden -fdata-sections -ffunction-sections")
- check_linker_flag(-Wl,--gc-sections)
-endif()
-
-if(LITE_WITH_OPENMP)
- find_package(OpenMP REQUIRED)
- if(OPENMP_FOUND OR OpenMP_CXX_FOUND)
- add_definitions(-DARM_WITH_OMP)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
- message(STATUS "Found OpenMP ${OpenMP_VERSION} ${OpenMP_CXX_VERSION}")
- message(STATUS "OpenMP C flags: ${OpenMP_C_FLAGS}")
- message(STATUS "OpenMP CXX flags: ${OpenMP_CXX_FLAGS}")
- message(STATUS "OpenMP OpenMP_CXX_LIB_NAMES: ${OpenMP_CXX_LIB_NAMES}")
- message(STATUS "OpenMP OpenMP_CXX_LIBRARIES: ${OpenMP_CXX_LIBRARIES}")
- else()
- message(FATAL_ERROR "Could not found OpenMP!")
- endif()
-endif()
-
-# third party cmake args
-set(CROSS_COMPILE_CMAKE_ARGS
- "-DCMAKE_SYSTEM_NAME=${CMAKE_SYSTEM_NAME}"
- "-DCMAKE_SYSTEM_VERSION=${CMAKE_SYSTEM_VERSION}")
-
-if(ANDROID)
- set(CROSS_COMPILE_CMAKE_ARGS ${CROSS_COMPILE_CMAKE_ARGS}
- "-DCMAKE_ANDROID_ARCH_ABI=${CMAKE_ANDROID_ARCH_ABI}"
- "-DCMAKE_ANDROID_NDK=${CMAKE_ANDROID_NDK}"
- "-DCMAKE_ANDROID_STL_TYPE=${CMAKE_ANDROID_STL_TYPE}"
- "-DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=${CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION}")
-endif()
-
-if(IOS)
- set(CROSS_COMPILE_CMAKE_ARGS ${CROSS_COMPILE_CMAKE_ARGS}
- "-DCMAKE_OSX_ARCHITECTURES=${CMAKE_OSX_ARCHITECTURES}"
- "-DCMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}"
- "-DCMAKE_OSX_SYSROOT=${CMAKE_OSX_SYSROOT}")
-endif()
diff --git a/cmake/cross_compiling/preproject.cmake b/cmake/cross_compiling/preproject.cmake
deleted file mode 100644
index 813d1910fcf8816434bb9ca2976a7357a3998e2e..0000000000000000000000000000000000000000
--- a/cmake/cross_compiling/preproject.cmake
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- return()
-endif()
-
-cmake_minimum_required(VERSION 3.10)
-
-# define check function
-function(check_input_var VAR_NAME)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs DEFAULT LIST)
- cmake_parse_arguments(check_input_var "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
-
- set(var_out "")
- if(NOT DEFINED ${VAR_NAME})
- set(var_out ${check_input_var_DEFAULT})
- else()
- set(var_out ${${VAR_NAME}})
- endif()
-
- if(NOT var_out IN_LIST check_input_var_LIST)
- message(FATAL_ERROR "${VAR_NAME}:${var_out} must be in one of ${check_input_var_LIST}")
- endif()
- set(${VAR_NAME} ${var_out} PARENT_SCOPE)
-endfunction(check_input_var)
-
-check_input_var(ARM_TARGET_OS DEFAULT "android" LIST "android" "armlinux" "ios" "ios64")
-check_input_var(ARM_TARGET_ARCH_ABI DEFAULT "armv8" LIST "armv8" "armv7" "armv7hf" "arm64-v8a" "armeabi-v7a")
-check_input_var(ARM_TARGET_LANG DEFAULT "gcc" LIST "gcc" "clang")
-check_input_var(ARM_TARGET_LIB_TYPE DEFAULT "static" LIST "static" "shared")
-
-include(cross_compiling/armlinux)
-include(cross_compiling/android)
-include(cross_compiling/ios)
-include(cross_compiling/host)
-
-if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Default use Release in android" FORCE)
-endif()
-
-if(NOT THIRD_PARTY_BUILD_TYPE)
- set(THIRD_PARTY_BUILD_TYPE "MinSizeRel" CACHE STRING "Default use MinSizeRel in android" FORCE)
-endif()
-
-message(STATUS "Lite ARM Compile ${ARM_TARGET_OS} with ${ARM_TARGET_ARCH_ABI} ${ARM_TARGET_LANG}")
diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake
deleted file mode 100644
index 1e6f34a62129e2ca0a717ceb489d98b56b78d47a..0000000000000000000000000000000000000000
--- a/cmake/cuda.cmake
+++ /dev/null
@@ -1,228 +0,0 @@
-if(NOT LITE_WITH_CUDA)
- return()
-endif()
-
-set(paddle_known_gpu_archs "30 35 50 52 60 61 70")
-set(paddle_known_gpu_archs7 "30 35 50 52")
-set(paddle_known_gpu_archs8 "30 35 50 52 60 61")
-set(paddle_known_gpu_archs9 "30 35 50 52 60 61 70")
-set(paddle_known_gpu_archs10 "30 35 50 52 60 61 70 75")
-
-######################################################################################
-# A function for automatic detection of GPUs installed (if autodetection is enabled)
-# Usage:
-# detect_installed_gpus(out_variable)
-function(detect_installed_gpus out_variable)
- if(NOT CUDA_gpu_detect_output)
- set(cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
-
- file(WRITE ${cufile} ""
- "#include \n"
- "int main() {\n"
- " int count = 0;\n"
- " if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
- " if (count == 0) return -1;\n"
- " for (int device = 0; device < count; ++device) {\n"
- " cudaDeviceProp prop;\n"
- " if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
- " std::printf(\"%d.%d \", prop.major, prop.minor);\n"
- " }\n"
- " return 0;\n"
- "}\n")
-
- execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "-ccbin=${CUDA_HOST_COMPILER}"
- "--run" "${cufile}"
- WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
- RESULT_VARIABLE nvcc_res OUTPUT_VARIABLE nvcc_out
- ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
-
- if(nvcc_res EQUAL 0)
- # only keep the last line of nvcc_out
- STRING(REGEX REPLACE ";" "\\\\;" nvcc_out "${nvcc_out}")
- STRING(REGEX REPLACE "\n" ";" nvcc_out "${nvcc_out}")
- list(GET nvcc_out -1 nvcc_out)
- string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
- set(CUDA_gpu_detect_output ${nvcc_out} CACHE INTERNAL "Returned GPU architetures from detect_installed_gpus tool" FORCE)
- endif()
- endif()
-
- if(NOT CUDA_gpu_detect_output)
- message(STATUS "Automatic GPU detection failed. Building for all known architectures.")
- set(${out_variable} ${paddle_known_gpu_archs} PARENT_SCOPE)
- else()
- set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE)
- endif()
-endfunction()
-
-
-########################################################################
-# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
-# Usage:
-# select_nvcc_arch_flags(out_variable)
-function(select_nvcc_arch_flags out_variable)
- # List of arch names
- set(archs_names "Kepler" "Maxwell" "Pascal" "Volta" "Turing" "All" "Manual")
- set(archs_name_default "All")
- list(APPEND archs_names "Auto")
-
- # set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
- set(CUDA_ARCH_NAME ${archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.")
- set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names} )
- mark_as_advanced(CUDA_ARCH_NAME)
-
- # verify CUDA_ARCH_NAME value
- if(NOT ";${archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
- string(REPLACE ";" ", " archs_names "${archs_names}")
- message(FATAL_ERROR "Only ${archs_names} architeture names are supported.")
- endif()
-
- if(${CUDA_ARCH_NAME} STREQUAL "Manual")
- set(CUDA_ARCH_BIN ${paddle_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
- set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
- mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
- else()
- unset(CUDA_ARCH_BIN CACHE)
- unset(CUDA_ARCH_PTX CACHE)
- endif()
-
- if(${CUDA_ARCH_NAME} STREQUAL "Kepler")
- set(cuda_arch_bin "30 35")
- elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
- set(cuda_arch_bin "50")
- elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal")
- set(cuda_arch_bin "60 61")
- elseif(${CUDA_ARCH_NAME} STREQUAL "Volta")
- set(cuda_arch_bin "70")
- elseif(${CUDA_ARCH_NAME} STREQUAL "Turing")
- set(cuda_arch_bin "75")
- elseif(${CUDA_ARCH_NAME} STREQUAL "All")
- set(cuda_arch_bin ${paddle_known_gpu_archs})
- elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
- detect_installed_gpus(cuda_arch_bin)
- else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
- set(cuda_arch_bin ${CUDA_ARCH_BIN})
- endif()
-
- # remove dots and convert to lists
- string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
- string(REGEX REPLACE "\\." "" cuda_arch_ptx "${CUDA_ARCH_PTX}")
- string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
- string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")
- list(REMOVE_DUPLICATES cuda_arch_bin)
- list(REMOVE_DUPLICATES cuda_arch_ptx)
-
- set(nvcc_flags "")
- set(nvcc_archs_readable "")
-
- # Tell NVCC to add binaries for the specified GPUs
- foreach(arch ${cuda_arch_bin})
- if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
- # User explicitly specified PTX for the concrete BIN
- list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
- list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
- else()
- # User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
- list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
- list(APPEND nvcc_archs_readable sm_${arch})
- endif()
- endforeach()
-
- # Tell NVCC to add PTX intermediate code for the specified architectures
- foreach(arch ${cuda_arch_ptx})
- list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch})
- list(APPEND nvcc_archs_readable compute_${arch})
- endforeach()
-
- string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
- set(${out_variable} ${nvcc_flags} PARENT_SCOPE)
- set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE)
-endfunction()
-
-message(STATUS "CUDA detected: " ${CUDA_VERSION})
-if (${CUDA_VERSION} LESS 7.0)
- set(paddle_known_gpu_archs ${paddle_known_gpu_archs})
- add_definitions("-DPADDLE_CUDA_BINVER=\"60\"")
-elseif (${CUDA_VERSION} LESS 8.0) # CUDA 7.x
- set(paddle_known_gpu_archs ${paddle_known_gpu_archs7})
- list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
- list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
- add_definitions("-DPADDLE_CUDA_BINVER=\"70\"")
-elseif (${CUDA_VERSION} LESS 9.0) # CUDA 8.x
- set(paddle_known_gpu_archs ${paddle_known_gpu_archs8})
- list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
- list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
- # CUDA 8 may complain that sm_20 is no longer supported. Suppress the
- # warning for now.
- list(APPEND CUDA_NVCC_FLAGS "-Wno-deprecated-gpu-targets")
- add_definitions("-DPADDLE_CUDA_BINVER=\"80\"")
-elseif (${CUDA_VERSION} LESS 10.0) # CUDA 9.x
- set(paddle_known_gpu_archs ${paddle_known_gpu_archs9})
- list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
- list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
- add_definitions("-DPADDLE_CUDA_BINVER=\"90\"")
-elseif (${CUDA_VERSION} LESS 11.0) # CUDA 10.x
- set(paddle_known_gpu_archs ${paddle_known_gpu_archs10})
- list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
- list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
- add_definitions("-DPADDLE_CUDA_BINVER=\"100\"")
-endif()
-
-include_directories(${CUDA_INCLUDE_DIRS})
-if(NOT WITH_DSO)
- if(WIN32)
- set_property(GLOBAL PROPERTY CUDA_MODULES ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY})
- endif(WIN32)
-endif(NOT WITH_DSO)
-
-# setting nvcc arch flags
-select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
-list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
-message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}")
-
-# Set C++11 support
-set(CUDA_PROPAGATE_HOST_FLAGS OFF)
-
-# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
-# So, don't set these flags here.
-if (NOT WIN32) # windows msvc2015 support c++11 natively.
-# -std=c++11 -fPIC not recoginize by msvc, -Xcompiler will be added by cmake.
-list(APPEND CUDA_NVCC_FLAGS "-std=c++11")
-list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC")
-endif(NOT WIN32)
-
-if(WITH_FAST_MATH)
- # Make use of fast math library. https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html
- list(APPEND CUDA_NVCC_FLAGS "--use_fast_math")
-endif()
-# in cuda9, suppress cuda warning on eigen
-list(APPEND CUDA_NVCC_FLAGS "-w")
-# Set :expt-relaxed-constexpr to suppress Eigen warnings
-list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr")
-
-if (NOT WIN32)
- if(CMAKE_BUILD_TYPE STREQUAL "Debug")
- list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
- elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
- list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
- elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
- list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
- elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
- # nvcc 9 does not support -Os. Use Release flags instead
- list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
- endif()
-else(NOT WIN32)
- list(APPEND CUDA_NVCC_FLAGS "-Xcompiler \"/wd 4244 /wd 4267 /wd 4819\"")
- list(APPEND CUDA_NVCC_FLAGS "--compiler-options;/bigobj")
- if(CMAKE_BUILD_TYPE STREQUAL "Debug")
- list(APPEND CUDA_NVCC_FLAGS "-g -G")
- # match the cl's _ITERATOR_DEBUG_LEVEL
- list(APPEND CUDA_NVCC_FLAGS "-D_DEBUG")
- elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
- list(APPEND CUDA_NVCC_FLAGS "-O3 -DNDEBUG")
- else()
- message(FATAL "Windows only support Release or Debug build now. Please set visual studio build type to Release/Debug, x64 build.")
-endif()
-endif(NOT WIN32)
-
-mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
-mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)
diff --git a/cmake/cudnn.cmake b/cmake/cudnn.cmake
deleted file mode 100644
index 3775d6cc2bdaa617f225b4cff9a03092bd9a19cc..0000000000000000000000000000000000000000
--- a/cmake/cudnn.cmake
+++ /dev/null
@@ -1,99 +0,0 @@
-if(NOT LITE_WITH_CUDA)
- return()
-endif()
-
-if(WIN32)
- set(CUDNN_ROOT ${CUDA_TOOLKIT_ROOT_DIR})
-else(WIN32)
- set(CUDNN_ROOT "/usr" CACHE PATH "CUDNN ROOT")
-endif(WIN32)
-
-find_path(CUDNN_INCLUDE_DIR cudnn.h
- PATHS ${CUDNN_ROOT} ${CUDNN_ROOT}/include
- $ENV{CUDNN_ROOT} $ENV{CUDNN_ROOT}/include ${CUDA_TOOLKIT_INCLUDE}
- NO_DEFAULT_PATH
-)
-
-get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH)
-
-set(TARGET_ARCH "x86_64")
-if(NOT ${CMAKE_SYSTEM_PROCESSOR})
- set(TARGET_ARCH ${CMAKE_SYSTEM_PROCESSOR})
-endif()
-
-list(APPEND CUDNN_CHECK_LIBRARY_DIRS
- ${CUDNN_ROOT}
- ${CUDNN_ROOT}/lib64
- ${CUDNN_ROOT}/lib
- ${CUDNN_ROOT}/lib/${TARGET_ARCH}-linux-gnu
- ${CUDNN_ROOT}/local/cuda-${CUDA_VERSION}/targets/${TARGET_ARCH}-linux/lib/
- $ENV{CUDNN_ROOT}
- $ENV{CUDNN_ROOT}/lib64
- $ENV{CUDNN_ROOT}/lib
- /usr/lib
- ${CUDA_TOOLKIT_ROOT_DIR}
- ${CUDA_TOOLKIT_ROOT_DIR}/lib/x64
- )
-set(CUDNN_LIB_NAME "libcudnn.so")
-
-if(WIN32)
-# only support cudnn7
-set(CUDNN_LIB_NAME "cudnn.lib" "cudnn64_7.dll")
-endif(WIN32)
-
-if(APPLE)
-set(CUDNN_LIB_NAME "libcudnn.dylib" "libcudnn.so")
-endif(APPLE)
-
-find_library(CUDNN_LIBRARY NAMES ${CUDNN_LIB_NAME} # libcudnn_static.a
- PATHS ${CUDNN_CHECK_LIBRARY_DIRS} ${CUDNN_INCLUDE_DIR} ${__libpath_hist}
- NO_DEFAULT_PATH
- DOC "Path to cuDNN library.")
-
-
-if(CUDNN_INCLUDE_DIR AND CUDNN_LIBRARY)
- set(CUDNN_FOUND ON)
-else()
- set(CUDNN_FOUND OFF)
-endif()
-
-if(CUDNN_FOUND)
- file(READ ${CUDNN_INCLUDE_DIR}/cudnn.h CUDNN_VERSION_FILE_CONTENTS)
-
- get_filename_component(CUDNN_LIB_PATH ${CUDNN_LIBRARY} DIRECTORY)
-
- string(REGEX MATCH "define CUDNN_VERSION +([0-9]+)"
- CUDNN_VERSION "${CUDNN_VERSION_FILE_CONTENTS}")
- string(REGEX REPLACE "define CUDNN_VERSION +([0-9]+)" "\\1"
- CUDNN_VERSION "${CUDNN_VERSION}")
-
- if("${CUDNN_VERSION}" STREQUAL "2000")
- message(STATUS "Current cuDNN version is v2. ")
- else()
- string(REGEX MATCH "define CUDNN_MAJOR +([0-9]+)" CUDNN_MAJOR_VERSION
- "${CUDNN_VERSION_FILE_CONTENTS}")
- string(REGEX REPLACE "define CUDNN_MAJOR +([0-9]+)" "\\1"
- CUDNN_MAJOR_VERSION "${CUDNN_MAJOR_VERSION}")
- string(REGEX MATCH "define CUDNN_MINOR +([0-9]+)" CUDNN_MINOR_VERSION
- "${CUDNN_VERSION_FILE_CONTENTS}")
- string(REGEX REPLACE "define CUDNN_MINOR +([0-9]+)" "\\1"
- CUDNN_MINOR_VERSION "${CUDNN_MINOR_VERSION}")
- string(REGEX MATCH "define CUDNN_PATCHLEVEL +([0-9]+)"
- CUDNN_PATCHLEVEL_VERSION "${CUDNN_VERSION_FILE_CONTENTS}")
- string(REGEX REPLACE "define CUDNN_PATCHLEVEL +([0-9]+)" "\\1"
- CUDNN_PATCHLEVEL_VERSION "${CUDNN_PATCHLEVEL_VERSION}")
-
- if(NOT CUDNN_MAJOR_VERSION)
- set(CUDNN_VERSION "???")
- else()
- add_definitions("-DPADDLE_CUDNN_BINVER=\"${CUDNN_MAJOR_VERSION}\"")
- math(EXPR CUDNN_VERSION
- "${CUDNN_MAJOR_VERSION} * 1000 +
- ${CUDNN_MINOR_VERSION} * 100 + ${CUDNN_PATCHLEVEL_VERSION}")
- endif()
-
- message(STATUS "Current cuDNN header is ${CUDNN_INCLUDE_DIR}/cudnn.h. "
- "Current cuDNN version is v${CUDNN_MAJOR_VERSION}. ")
-
- endif()
-endif()
diff --git a/cmake/cupti.cmake b/cmake/cupti.cmake
deleted file mode 100644
index 72ed0f1e5858d6d836743ceb038c7f4ad8f194cf..0000000000000000000000000000000000000000
--- a/cmake/cupti.cmake
+++ /dev/null
@@ -1,41 +0,0 @@
-if(NOT WITH_GPU)
- return()
-endif()
-
-
-set(CUPTI_ROOT "/usr" CACHE PATH "CUPTI ROOT")
-find_path(CUPTI_INCLUDE_DIR cupti.h
- PATHS ${CUPTI_ROOT} ${CUPTI_ROOT}/include
- $ENV{CUPTI_ROOT} $ENV{CUPTI_ROOT}/include
- ${CUDA_TOOLKIT_ROOT_DIR}/extras/CUPTI/include
- NO_DEFAULT_PATH
- )
-
-get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH)
-
-set(TARGET_ARCH "x86_64")
-if(NOT ${CMAKE_SYSTEM_PROCESSOR})
- set(TARGET_ARCH ${CMAKE_SYSTEM_PROCESSOR})
-endif()
-
-list(APPEND CUPTI_CHECK_LIBRARY_DIRS
- ${CUPTI_ROOT}
- ${CUPTI_ROOT}/lib64
- ${CUPTI_ROOT}/lib
- ${CUPTI_ROOT}/lib/${TARGET_ARCH}-linux-gnu
- $ENV{CUPTI_ROOT}
- $ENV{CUPTI_ROOT}/lib64
- $ENV{CUPTI_ROOT}/lib
- /usr/lib
- ${CUDA_TOOLKIT_ROOT_DIR}/extras/CUPTI/lib64)
-find_library(CUPTI_LIBRARY NAMES libcupti.so libcupti.dylib # libcupti_static.a
- PATHS ${CUPTI_CHECK_LIBRARY_DIRS} ${CUPTI_INCLUDE_DIR} ${__libpath_hist}
- NO_DEFAULT_PATH
- DOC "Path to cuPTI library.")
-
-get_filename_component(CUPTI_LIBRARY_PATH ${CUPTI_LIBRARY} DIRECTORY)
-if(CUPTI_INCLUDE_DIR AND CUPTI_LIBRARY)
- set(CUPTI_FOUND ON)
-else()
- set(CUPTI_FOUND OFF)
-endif()
diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake
deleted file mode 100644
index bd0d117a633824d93c403b8167ff49505160069b..0000000000000000000000000000000000000000
--- a/cmake/external/eigen.cmake
+++ /dev/null
@@ -1,54 +0,0 @@
-INCLUDE(ExternalProject)
-
-SET(EIGEN_SOURCE_DIR ${THIRD_PARTY_PATH}/eigen3)
-SET(EIGEN_INCLUDE_DIR ${EIGEN_SOURCE_DIR}/src/extern_eigen3)
-INCLUDE_DIRECTORIES(${EIGEN_INCLUDE_DIR})
-if(NOT WITH_FAST_MATH)
- # EIGEN_FAST_MATH: https://eigen.tuxfamily.org/dox/TopicPreprocessorDirectives.html
- # enables some optimizations which might affect the accuracy of the result.
- # This currently enables the SSE vectorization of sin() and cos(),
- # and speedups sqrt() for single precision.
- # Defined to 1 by default. Define it to 0 to disable.
- add_definitions(-DEIGEN_FAST_MATH=0)
-endif()
-
-if(WITH_AMD_GPU)
- ExternalProject_Add(
- extern_eigen3
- ${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY "https://github.com/sabreshao/hipeigen.git"
- GIT_TAG 7cb2b6e5a4b4a1efe658abb215cd866c6fb2275e
- PREFIX ${EIGEN_SOURCE_DIR}
- UPDATE_COMMAND ""
- CONFIGURE_COMMAND ""
- BUILD_COMMAND ""
- INSTALL_COMMAND ""
- TEST_COMMAND ""
- )
-else()
- ExternalProject_Add(
- extern_eigen3
- ${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY "https://github.com/eigenteam/eigen-git-mirror"
- # eigen on cuda9.1 missing header of math_funtions.hpp
- # https://stackoverflow.com/questions/43113508/math-functions-hpp-not-found-when-using-cuda-with-eigen
- GIT_TAG 917060c364181f33a735dc023818d5a54f60e54c
- PREFIX ${EIGEN_SOURCE_DIR}
- DOWNLOAD_NAME "eigen"
- UPDATE_COMMAND ""
- CONFIGURE_COMMAND ""
- BUILD_COMMAND ""
- INSTALL_COMMAND ""
- TEST_COMMAND ""
- )
-endif()
-
-if (${CMAKE_VERSION} VERSION_LESS "3.3.0")
- set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/eigen3_dummy.c)
- file(WRITE ${dummyfile} "const char *dummy_eigen3 = \"${dummyfile}\";")
- add_library(eigen3 STATIC ${dummyfile})
-else()
- add_library(eigen3 INTERFACE)
-endif()
-
-add_dependencies(eigen3 extern_eigen3)
diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake
deleted file mode 100644
index 44ede9617159fde51939f0ad71fc98e95325865f..0000000000000000000000000000000000000000
--- a/cmake/external/gflags.cmake
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-INCLUDE(ExternalProject)
-
-SET(GFLAGS_SOURCES_DIR ${CMAKE_SOURCE_DIR}/third-party/gflags)
-SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
-SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
-IF(WIN32)
- set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
-ELSE(WIN32)
- set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
-ENDIF(WIN32)
-
-INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
-
-SET(OPTIONAL_ARGS "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
- "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
- "-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}"
- "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}"
- "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}"
- "-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}"
- "-DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG}"
- "-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}")
-
-ExternalProject_Add(
- extern_gflags
- ${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY ""
- GIT_TAG 77592648e3f3be87d6c7123eb81cbad75f9aef5a
- SOURCE_DIR ${GFLAGS_SOURCES_DIR}
- PREFIX ${GFLAGS_INCLUDE_DIR}
- UPDATE_COMMAND ""
- CMAKE_ARGS -DBUILD_STATIC_LIBS=ON
- -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
- -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- -DBUILD_TESTING=OFF
- -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
- ${CROSS_COMPILE_CMAKE_ARGS}
- ${OPTIONAL_ARGS}
- ${EXTERNAL_OPTIONAL_ARGS}
- CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GFLAGS_INSTALL_DIR}
- -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
- -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
-)
-IF(WIN32)
- IF(NOT EXISTS "${GFLAGS_INSTALL_DIR}/lib/libgflags.lib")
- add_custom_command(TARGET extern_gflags POST_BUILD
- COMMAND cmake -E copy ${GFLAGS_INSTALL_DIR}/lib/gflags_static.lib ${GFLAGS_INSTALL_DIR}/lib/libgflags.lib
- )
- ENDIF()
-ENDIF(WIN32)
-ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL)
-SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
-ADD_DEPENDENCIES(gflags extern_gflags)
-
-# On Windows (including MinGW), the Shlwapi library is used by gflags if available.
-if (WIN32)
- include(CheckIncludeFileCXX)
- check_include_file_cxx("shlwapi.h" HAVE_SHLWAPI)
- if (HAVE_SHLWAPI)
- set_property(GLOBAL PROPERTY OS_DEPENDENCY_MODULES shlwapi.lib)
- endif(HAVE_SHLWAPI)
-endif (WIN32)
diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake
deleted file mode 100644
index 970020d784fdc621cd053cd8e53be4b1f861ec67..0000000000000000000000000000000000000000
--- a/cmake/external/glog.cmake
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-INCLUDE(ExternalProject)
-
-SET(GLOG_SOURCES_DIR ${THIRD_PARTY_PATH}/glog)
-SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog)
-SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE)
-
-IF(WIN32)
- SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
- SET(GLOG_CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4267 /wd4530")
-ELSE(WIN32)
- SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
- SET(GLOG_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
-ENDIF(WIN32)
-
-INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR})
-
-SET(GLOG_REPOSITORY "https://github.com/google/glog.git")
-SET(GLOG_TAG "v0.3.5")
-
-SET(OPTIONAL_ARGS "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
- "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
- "-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}"
- "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}"
- "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}"
- "-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}"
- "-DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG}"
- "-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}")
-
-ExternalProject_Add(
- extern_glog
- ${EXTERNAL_PROJECT_LOG_ARGS}
- DEPENDS gflags
- GIT_REPOSITORY ${GLOG_REPOSITORY}
- GIT_TAG ${GLOG_TAG}
- PREFIX ${GLOG_SOURCES_DIR}
- UPDATE_COMMAND ""
- CMAKE_ARGS ${CROSS_COMPILE_CMAKE_ARGS}
- ${OPTIONAL_ARGS}
- -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR}
- -DCMAKE_INSTALL_LIBDIR=${GLOG_INSTALL_DIR}/lib
- -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- -DWITH_GFLAGS=ON
- -Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags
- -DBUILD_TESTING=OFF
- -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
- ${EXTERNAL_OPTIONAL_ARGS}
- CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GLOG_INSTALL_DIR}
- -DCMAKE_INSTALL_LIBDIR:PATH=${GLOG_INSTALL_DIR}/lib
- -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
- -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
-)
-IF(WIN32)
- IF(NOT EXISTS "${GLOG_INSTALL_DIR}/lib/libglog.lib")
- add_custom_command(TARGET extern_glog POST_BUILD
- COMMAND cmake -E copy ${GLOG_INSTALL_DIR}/lib/glog.lib ${GLOG_INSTALL_DIR}/lib/libglog.lib
- )
- ENDIF()
-ENDIF(WIN32)
-
-ADD_LIBRARY(glog STATIC IMPORTED GLOBAL)
-SET_PROPERTY(TARGET glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARIES})
-ADD_DEPENDENCIES(glog extern_glog gflags)
-LINK_LIBRARIES(glog gflags)
diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake
deleted file mode 100644
index 0df39138dd33de701fce80945d457b55a372ae17..0000000000000000000000000000000000000000
--- a/cmake/external/gtest.cmake
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# the gtest is only used when WITH_TESTING=ON
-IF(WITH_TESTING)
- IF(WITH_TESTING)
- ENABLE_TESTING()
- ENDIF(WITH_TESTING)
-
- INCLUDE(ExternalProject)
-
- SET(GTEST_SOURCES_DIR ${CMAKE_SOURCE_DIR}/third-party/googletest)
- SET(GTEST_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gtest)
- SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE)
-
- INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR})
-
- IF(WIN32)
- set(GTEST_LIBRARIES
- "${GTEST_INSTALL_DIR}/lib/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE)
- set(GTEST_MAIN_LIBRARIES
- "${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE)
- ELSE(WIN32)
- set(GTEST_LIBRARIES
- "${GTEST_INSTALL_DIR}/lib/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE)
- set(GTEST_MAIN_LIBRARIES
- "${GTEST_INSTALL_DIR}/lib/libgtest_main.a" CACHE FILEPATH "gtest main libraries." FORCE)
- ENDIF(WIN32)
-
- IF(WITH_MKLML)
- # wait for mklml downloading completed
- SET(GTEST_DEPENDS ${MKLML_PROJECT})
- ENDIF()
-
- SET(OPTIONAL_ARGS "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
- "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
- "-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}"
- "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}"
- "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}"
- "-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}"
- "-DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG}"
- "-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}")
-
- ExternalProject_Add(
- extern_gtest
- ${EXTERNAL_PROJECT_LOG_ARGS}
- DEPENDS ${GTEST_DEPENDS}
- GIT_REPOSITORY ""
- SOURCE_DIR ${GTEST_SOURCES_DIR}
- GIT_TAG "release-1.8.0"
- PREFIX ${GTEST_INSTALL_DIR}
- UPDATE_COMMAND ""
- CMAKE_ARGS ${CROSS_COMPILE_CMAKE_ARGS}
- ${OPTIONAL_ARGS}
- -DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR}
- -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- -DBUILD_GMOCK=ON
- -Dgtest_disable_pthreads=ON
- -Dgtest_force_shared_crt=ON
- -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
- ${EXTERNAL_OPTIONAL_ARGS}
- CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR}
- -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
- -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
- )
-
- ADD_LIBRARY(gtest STATIC IMPORTED GLOBAL)
- SET_PROPERTY(TARGET gtest PROPERTY IMPORTED_LOCATION ${GTEST_LIBRARIES})
- ADD_DEPENDENCIES(gtest extern_gtest)
-
- ADD_LIBRARY(gtest_main STATIC IMPORTED GLOBAL)
- SET_PROPERTY(TARGET gtest_main PROPERTY IMPORTED_LOCATION ${GTEST_MAIN_LIBRARIES})
- ADD_DEPENDENCIES(gtest_main extern_gtest)
-
-ENDIF()
diff --git a/cmake/external/libxsmm.cmake b/cmake/external/libxsmm.cmake
deleted file mode 100644
index 69cdba7c5921f14a87172d95791332e364045b26..0000000000000000000000000000000000000000
--- a/cmake/external/libxsmm.cmake
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-OPTION(WITH_LIBXSMM "Compile with libxsmm" OFF)
-
-IF(NOT WITH_LIBXSMM)
- return()
-ENDIF()
-
-IF(WIN32 OR APPLE)
- MESSAGE(WARNING "Windows, Mac are not supported with libxsmm in Paddle yet.")
- SET(WITH_LIBXSMM OFF CACHE STRING "Disable LIBXSMM" FORCE)
- return()
-ENDIF()
-
-INCLUDE (ExternalProject)
-
-SET(LIBXSMM_SOURCES_DIR ${THIRD_PARTY_PATH}/libxsmm)
-SET(LIBXSMM_INSTALL_DIR ${THIRD_PARTY_PATH}/install/libxsmm)
-SET(LIBXSMM_INCLUDE_DIR "${LIBXSMM_INSTALL_DIR}/include" CACHE PATH "LIBXSMM include directory." FORCE)
-SET(LIBXSMM_LIBRARY_DIR "${LIBXSMM_INSTALL_DIR}/lib" CACHE PATH "LIBXSMM library directory." FORCE)
-SET(LIBXSMM_LIBS "${LIBXSMM_LIBRARY_DIR}/libxsmm.a"
- "${LIBXSMM_LIBRARY_DIR}/libxsmmnoblas.a")
-
-ExternalProject_Add(
- extern_libxsmm
- GIT_REPOSITORY "https://github.com/hfp/libxsmm.git"
- GIT_TAG "7cc03b5b342fdbc6b6d990b190671c5dbb8489a2"
- PREFIX ${LIBXSMM_SOURCES_DIR}
- UPDATE_COMMAND ""
- CONFIGURE_COMMAND ""
- BUILD_IN_SOURCE 1
- BUILD_COMMAND $(MAKE) --silent PREFIX=${LIBXSMM_INSTALL_DIR} CXX=g++ CC=gcc WARP=0 install
- INSTALL_COMMAND ""
-)
-ADD_LIBRARY(libxsmm STATIC IMPORTED GLOBAL)
-SET_PROPERTY(TARGET libxsmm PROPERTY IMPORTED_LOCATION "${LIBXSMM_LIBRARY_DIR}/libxsmm.a")
-SET_PROPERTY(TARGET libxsmm PROPERTY IMPORTED_LOCATION "${LIBXSMM_LIBRARY_DIR}/libxsmmnoblas.a")
-
-MESSAGE(STATUS "Libxsmm library: ${LIBXSMM_LIBS}")
-include_directories(${LIBXSMM_INCLUDE_DIR})
-ADD_DEFINITIONS(-DPADDLE_WITH_LIBXSMM)
-ADD_DEPENDENCIES(libxsmm extern_libxsmm)
diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake
deleted file mode 100644
index b1e437a9007072c82ab375bf5ed79fc7d6c80c47..0000000000000000000000000000000000000000
--- a/cmake/external/mkldnn.cmake
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-IF(NOT ${WITH_MKLDNN})
- return()
-ENDIF(NOT ${WITH_MKLDNN})
-
-INCLUDE(ExternalProject)
-
-SET(MKLDNN_PROJECT "extern_mkldnn")
-SET(MKLDNN_SOURCES_DIR ${THIRD_PARTY_PATH}/mkldnn)
-SET(MKLDNN_INSTALL_DIR ${THIRD_PARTY_PATH}/install/mkldnn)
-SET(MKLDNN_INC_DIR "${MKLDNN_INSTALL_DIR}/include" CACHE PATH "mkldnn include directory." FORCE)
-
-IF(APPLE)
- MESSAGE(WARNING
- "Mac is not supported with MKLDNN in Paddle yet."
- "Force WITH_MKLDNN=OFF")
- SET(WITH_MKLDNN OFF CACHE STRING "Disable MKLDNN in MacOS" FORCE)
- return()
-ENDIF()
-
-# Introduce variables:
-# * CMAKE_INSTALL_LIBDIR
-INCLUDE(GNUInstallDirs)
-SET(LIBDIR "lib")
-if(CMAKE_INSTALL_LIBDIR MATCHES ".*lib64$")
- SET(LIBDIR "lib64")
-endif()
-
-MESSAGE(STATUS "Set ${MKLDNN_INSTALL_DIR}/l${LIBDIR} to runtime path")
-SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
-SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${MKLDNN_INSTALL_DIR}/${LIBDIR}")
-
-INCLUDE_DIRECTORIES(${MKLDNN_INC_DIR}) # For MKLDNN code to include internal headers.
-
-IF(${CBLAS_PROVIDER} STREQUAL "MKLML")
- SET(MKLDNN_DEPENDS ${MKLML_PROJECT})
- MESSAGE(STATUS "Build MKLDNN with MKLML ${MKLML_ROOT}")
-ELSE()
- MESSAGE(FATAL_ERROR "Should enable MKLML when build MKLDNN")
-ENDIF()
-
-IF(NOT WIN32)
- SET(MKLDNN_FLAG "-Wno-error=strict-overflow -Wno-error=unused-result -Wno-error=array-bounds")
- SET(MKLDNN_FLAG "${MKLDNN_FLAG} -Wno-unused-result -Wno-unused-value")
- SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} ${MKLDNN_FLAG}")
- SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} ${MKLDNN_FLAG}")
-ELSE()
- SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} /EHsc")
-ENDIF(NOT WIN32)
-
-ExternalProject_Add(
- ${MKLDNN_PROJECT}
- ${EXTERNAL_PROJECT_LOG_ARGS}
- DEPENDS ${MKLDNN_DEPENDS}
- GIT_REPOSITORY "https://github.com/intel/mkl-dnn.git"
- GIT_TAG "863ff6e7042cec7d2e29897fe9f0872e0888b0fc"
- PREFIX ${MKLDNN_SOURCES_DIR}
- UPDATE_COMMAND ""
- CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
- CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
- CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- CMAKE_ARGS -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG}
- CMAKE_ARGS -DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}
- CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR}
- CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
- CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- CMAKE_ARGS -DMKLROOT=${MKLML_ROOT}
- CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG}
- CMAKE_ARGS -DWITH_TEST=OFF -DWITH_EXAMPLE=OFF
- CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR}
- -DMKLROOT:PATH=${MKLML_ROOT}
-)
-if(WIN32)
- SET(MKLDNN_LIB "${MKLDNN_INSTALL_DIR}/${LIBDIR}/mkldnn.lib" CACHE FILEPATH "mkldnn library." FORCE)
-else(WIN32)
- SET(MKLDNN_LIB "${MKLDNN_INSTALL_DIR}/${LIBDIR}/libmkldnn.so" CACHE FILEPATH "mkldnn library." FORCE)
-endif(WIN32)
-
-ADD_LIBRARY(shared_mkldnn SHARED IMPORTED GLOBAL)
-SET_PROPERTY(TARGET shared_mkldnn PROPERTY IMPORTED_LOCATION ${MKLDNN_LIB})
-ADD_DEPENDENCIES(shared_mkldnn ${MKLDNN_PROJECT})
-MESSAGE(STATUS "MKLDNN library: ${MKLDNN_LIB}")
-add_definitions(-DPADDLE_WITH_MKLDNN)
-
-# generate a static dummy target to track mkldnn dependencies
-# for cc_library(xxx SRCS xxx.c DEPS mkldnn)
-SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/mkldnn_dummy.c)
-FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";")
-ADD_LIBRARY(mkldnn STATIC ${dummyfile})
-TARGET_LINK_LIBRARIES(mkldnn ${MKLDNN_LIB} ${MKLML_LIB} ${MKLML_IOMP_LIB})
-ADD_DEPENDENCIES(mkldnn ${MKLDNN_PROJECT})
-
-# copy the real so.0 lib to install dir
-# it can be directly contained in wheel or capi
-if(WIN32)
- SET(MKLDNN_SHARED_LIB ${MKLDNN_INSTALL_DIR}/bin/mkldnn.dll)
-else(WIN32)
- SET(MKLDNN_SHARED_LIB ${MKLDNN_INSTALL_DIR}/libmkldnn.so.0)
- ADD_CUSTOM_COMMAND(OUTPUT ${MKLDNN_SHARED_LIB}
- COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_LIB} ${MKLDNN_SHARED_LIB}
- DEPENDS mkldnn shared_mkldnn)
-endif(WIN32)
-ADD_CUSTOM_TARGET(mkldnn_shared_lib ALL DEPENDS ${MKLDNN_SHARED_LIB})
-ADD_DEPENDENCIES(mkldnn_shared_lib ${MKLDNN_PROJECT} mkldnn)
diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake
deleted file mode 100644
index 142fce816de4f06aa0a36b91e3e4ecb962a8dc2a..0000000000000000000000000000000000000000
--- a/cmake/external/mklml.cmake
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-IF(NOT ${WITH_MKLML})
- return()
-ENDIF(NOT ${WITH_MKLML})
-
-IF(APPLE)
- MESSAGE(WARNING "Mac is not supported with MKLML in Paddle yet. Force WITH_MKLML=OFF.")
- SET(WITH_MKLML OFF CACHE STRING "Disable MKLML package in MacOS" FORCE)
- return()
-ENDIF()
-
-INCLUDE(ExternalProject)
-SET(MKLML_DST_DIR "mklml")
-SET(MKLML_INSTALL_ROOT "${THIRD_PARTY_PATH}/install")
-SET(MKLML_INSTALL_DIR ${MKLML_INSTALL_ROOT}/${MKLML_DST_DIR})
-SET(MKLML_ROOT ${MKLML_INSTALL_DIR})
-SET(MKLML_INC_DIR ${MKLML_ROOT}/include)
-SET(MKLML_LIB_DIR ${MKLML_ROOT}/lib)
-SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${MKLML_ROOT}/lib")
-
-SET(TIME_VERSION "2019.0.1.20181227")
-IF(WIN32)
- SET(MKLML_VER "mklml_win_${TIME_VERSION}" CACHE STRING "" FORCE)
- SET(MKLML_URL "https://paddlepaddledeps.bj.bcebos.com/${MKLML_VER}.zip" CACHE STRING "" FORCE)
- SET(MKLML_LIB ${MKLML_LIB_DIR}/mklml.lib)
- SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.lib)
- SET(MKLML_SHARED_LIB ${MKLML_LIB_DIR}/mklml.dll)
- SET(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.dll)
-ELSE()
- #TODO(intel-huying):
- # Now enable Erf function in mklml library temporarily, it will be updated as offical version later.
- SET(MKLML_VER "Glibc225_vsErf_mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE)
- SET(MKLML_URL "http://paddlepaddledeps.bj.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE)
- SET(MKLML_LIB ${MKLML_LIB_DIR}/libmklml_intel.so)
- SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5.so)
- SET(MKLML_SHARED_LIB ${MKLML_LIB_DIR}/libmklml_intel.so)
- SET(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5.so)
-ENDIF()
-
-SET(MKLML_PROJECT "extern_mklml")
-MESSAGE(STATUS "MKLML_VER: ${MKLML_VER}, MKLML_URL: ${MKLML_URL}")
-SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml")
-SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}")
-
-ExternalProject_Add(
- ${MKLML_PROJECT}
- ${EXTERNAL_PROJECT_LOG_ARGS}
- PREFIX ${MKLML_SOURCE_DIR}
- URL ${MKLML_URL}
- DOWNLOAD_DIR ${MKLML_DOWNLOAD_DIR}
- DOWNLOAD_NO_PROGRESS 1
- CONFIGURE_COMMAND ""
- BUILD_COMMAND ""
- UPDATE_COMMAND ""
- INSTALL_COMMAND
- ${CMAKE_COMMAND} -E copy_directory ${MKLML_DOWNLOAD_DIR}/include ${MKLML_INC_DIR} &&
- ${CMAKE_COMMAND} -E copy_directory ${MKLML_DOWNLOAD_DIR}/lib ${MKLML_LIB_DIR}
-)
-
-INCLUDE_DIRECTORIES(${MKLML_INC_DIR})
-
-ADD_LIBRARY(mklml SHARED IMPORTED GLOBAL)
-SET_PROPERTY(TARGET mklml PROPERTY IMPORTED_LOCATION ${MKLML_LIB})
-ADD_DEPENDENCIES(mklml ${MKLML_PROJECT})
diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake
deleted file mode 100644
index d8a4a0be6f5aaa3a1a4977bbc68348743f2fa742..0000000000000000000000000000000000000000
--- a/cmake/external/openblas.cmake
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-INCLUDE(cblas)
-
-IF(NOT ${CBLAS_FOUND})
- INCLUDE(ExternalProject)
-
- SET(CBLAS_SOURCES_DIR ${THIRD_PARTY_PATH}/openblas)
- SET(CBLAS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/openblas)
- SET(CBLAS_INC_DIR "${CBLAS_INSTALL_DIR}/include" CACHE PATH "openblas include directory." FORCE)
-
- SET(CBLAS_LIBRARIES
- "${CBLAS_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}openblas${CMAKE_STATIC_LIBRARY_SUFFIX}"
- CACHE FILEPATH "openblas library." FORCE)
-
- ADD_DEFINITIONS(-DPADDLE_USE_OPENBLAS)
-
- IF (WIN32)
- SET(CBLAS_FOUND true)
- MESSAGE(WARNING, "In windows, openblas only support msvc build, please build it manually and put it at " ${CBLAS_INSTALL_DIR})
- ENDIF(WIN32)
-
- IF (NOT WIN32)
- SET(OPENBLAS_CC "${CMAKE_C_COMPILER} -Wno-unused-but-set-variable -Wno-unused-variable")
- SET(OPENBLAS_COMMIT "v0.2.20")
-
- IF(APPLE)
- SET(OPENBLAS_CC "${CMAKE_C_COMPILER} -isysroot ${CMAKE_OSX_SYSROOT}")
- ENDIF()
- SET(OPTIONAL_ARGS "")
- IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^x86(_64)?$")
- SET(OPTIONAL_ARGS DYNAMIC_ARCH=1 NUM_THREADS=64)
- ENDIF()
-
- SET(COMMON_ARGS CC=${OPENBLAS_CC} NO_SHARED=1 NO_LAPACK=1 libs)
- ExternalProject_Add(
- extern_openblas
- ${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY https://github.com/xianyi/OpenBLAS.git
- GIT_TAG ${OPENBLAS_COMMIT}
- PREFIX ${CBLAS_SOURCES_DIR}
- INSTALL_DIR ${CBLAS_INSTALL_DIR}
- BUILD_IN_SOURCE 1
- BUILD_COMMAND ${CMAKE_MAKE_PROGRAM} ${COMMON_ARGS} ${OPTIONAL_ARGS}
- INSTALL_COMMAND ${CMAKE_MAKE_PROGRAM} install NO_SHARED=1 NO_LAPACK=1 PREFIX=
- && rm -r ${CBLAS_INSTALL_DIR}/lib/cmake ${CBLAS_INSTALL_DIR}/lib/pkgconfig
- UPDATE_COMMAND ""
- CONFIGURE_COMMAND ""
- )
- ELSE()
- ENDIF(NOT WIN32)
- SET(CBLAS_PROVIDER openblas)
-ENDIF(NOT ${CBLAS_FOUND})
-
-MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}")
-MESSAGE(STATUS "BLAS Include: ${CBLAS_INC_DIR}")
-INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
-
-# FIXME(gangliao): generate cblas target to track all high performance
-# linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas)
-SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
-FILE(WRITE ${dummyfile} "const char *dummy_cblas = \"${dummyfile}\";")
-ADD_LIBRARY(cblas STATIC ${dummyfile})
-
-IF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
- TARGET_LINK_LIBRARIES(cblas dynload_mklml)
-ELSE()
- TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})
-ENDIF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
-
-IF(WITH_LIBXSMM)
- TARGET_LINK_LIBRARIES(cblas ${LIBXSMM_LIBS})
- ADD_DEPENDENCIES(cblas extern_libxsmm)
-ENDIF()
-
-IF(NOT ${CBLAS_FOUND})
- ADD_DEPENDENCIES(cblas extern_openblas)
-ELSE()
- IF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
- ADD_DEPENDENCIES(cblas mklml)
- ENDIF()
-ENDIF(NOT ${CBLAS_FOUND})
diff --git a/cmake/external/opencl-clhpp.cmake b/cmake/external/opencl-clhpp.cmake
deleted file mode 100644
index ea724860d9b40ab5669975cebc6d5e1d7b662fb4..0000000000000000000000000000000000000000
--- a/cmake/external/opencl-clhpp.cmake
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-INCLUDE(ExternalProject)
-
-SET(OPENCL_CLHPP_SRCS_DIR ${THIRD_PARTY_PATH}/opencl-clhpp)
-SET(OPENCL_CLHPP_INSTALL_DIR ${THIRD_PARTY_PATH}/install/opencl-clhpp)
-SET(OPENCL_CLHPP_INCLUDE_DIR "${OPENCL_CLHPP_INSTALL_DIR}" CACHE PATH "opencl-clhpp include directory." FORCE)
-
-INCLUDE_DIRECTORIES(${OPENCL_CLHPP_INCLUDE_DIR})
-
-ExternalProject_Add(
- opencl_clhpp
- GIT_REPOSITORY "https://github.com/KhronosGroup/OpenCL-CLHPP.git"
- GIT_TAG "v2.0.10"
- PREFIX "${OPENCL_CLHPP_SRCS_DIR}"
- CMAKE_ARGS -DBUILD_DOCS=OFF
- -DBUILD_EXAMPLES=OFF
- -DBUILD_TESTS=OFF
- -DCMAKE_INSTALL_PREFIX=${OPENCL_CLHPP_INSTALL_DIR}
- CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${OPENCL_CLHPP_INSTALL_DIR}
- -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
-)
-
-ADD_DEPENDENCIES(opencl_clhpp opencl_headers)
diff --git a/cmake/external/opencl-headers.cmake b/cmake/external/opencl-headers.cmake
deleted file mode 100644
index 68c9c5251cfb04df4882fdd455936832440d3cff..0000000000000000000000000000000000000000
--- a/cmake/external/opencl-headers.cmake
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-INCLUDE(ExternalProject)
-
-SET(OPENCL_HEADERS_SRCS_DIR ${THIRD_PARTY_PATH}/opencl-headers)
-SET(OPENCL_HEADERS_INCLUDE_DIR "${OPENCL_HEADERS_SRCS_DIR}/src/opencl_headers" CACHE PATH "opencl-headers include directory." FORCE)
-
-INCLUDE_DIRECTORIES(${OPENCL_HEADERS_INCLUDE_DIR})
-
-ExternalProject_Add(
- opencl_headers
- ${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY "https://github.com/KhronosGroup/OpenCL-Headers.git"
- GIT_TAG "c5a4bbeabb10d8ed3d1c651b93aa31737bc473dd"
- PREFIX ${OPENCL_HEADERS_SRCS_DIR}
- DOWNLOAD_NAME "OpenCL-Headers"
- CONFIGURE_COMMAND ""
- BUILD_COMMAND ""
- INSTALL_COMMAND ""
- TEST_COMMAND ""
-)
diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake
deleted file mode 100644
index 2a88cf0321fa42d358fb3fc9d3555e5cabb8c4a6..0000000000000000000000000000000000000000
--- a/cmake/external/protobuf.cmake
+++ /dev/null
@@ -1,308 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-INCLUDE(ExternalProject)
-# Always invoke `FIND_PACKAGE(Protobuf)` for importing function protobuf_generate_cpp
-IF(NOT WIN32)
-FIND_PACKAGE(Protobuf QUIET)
-ENDIF(NOT WIN32)
-macro(UNSET_VAR VAR_NAME)
- UNSET(${VAR_NAME} CACHE)
- UNSET(${VAR_NAME})
-endmacro()
-
-UNSET_VAR(PROTOBUF_INCLUDE_DIR)
-UNSET_VAR(PROTOBUF_FOUND)
-UNSET_VAR(PROTOBUF_PROTOC_EXECUTABLE)
-UNSET_VAR(PROTOBUF_PROTOC_LIBRARY)
-UNSET_VAR(PROTOBUF_LITE_LIBRARY)
-UNSET_VAR(PROTOBUF_LIBRARY)
-UNSET_VAR(PROTOBUF_INCLUDE_DIR)
-UNSET_VAR(Protobuf_PROTOC_EXECUTABLE)
-function(protobuf_generate_python SRCS)
- # shameless copy from https://github.com/Kitware/CMake/blob/master/Modules/FindProtobuf.cmake
- if(NOT ARGN)
- message(SEND_ERROR "Error: PROTOBUF_GENERATE_PYTHON() called without any proto files")
- return()
- endif()
-
- if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
- # Create an include path for each file specified
- foreach(FIL ${ARGN})
- get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
- get_filename_component(ABS_PATH ${ABS_FIL} PATH)
- list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
- if(${_contains_already} EQUAL -1)
- list(APPEND _protobuf_include_path -I ${ABS_PATH})
- endif()
- endforeach()
- else()
- set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
- endif()
- if(DEFINED PROTOBUF_IMPORT_DIRS AND NOT DEFINED Protobuf_IMPORT_DIRS)
- set(Protobuf_IMPORT_DIRS "${PROTOBUF_IMPORT_DIRS}")
- endif()
-
- if(DEFINED Protobuf_IMPORT_DIRS)
- foreach(DIR ${Protobuf_IMPORT_DIRS})
- get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
- list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
- if(${_contains_already} EQUAL -1)
- list(APPEND _protobuf_include_path -I ${ABS_PATH})
- endif()
- endforeach()
- endif()
-
- set(${SRCS})
- foreach(FIL ${ARGN})
- get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
- get_filename_component(FIL_WE ${FIL} NAME_WE)
- if(NOT PROTOBUF_GENERATE_CPP_APPEND_PATH)
- get_filename_component(FIL_DIR ${FIL} DIRECTORY)
- if(FIL_DIR)
- set(FIL_WE "${FIL_DIR}/${FIL_WE}")
- endif()
- endif()
- list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py")
- add_custom_command(
- OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}_pb2.py"
- COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL}
- DEPENDS ${ABS_FIL} ${PROTOBUF_PROTOC_EXECUTABLE}
- COMMENT "Running Python protocol buffer compiler on ${FIL}"
- VERBATIM )
- endforeach()
-
- set(${SRCS} ${${SRCS}} PARENT_SCOPE)
-endfunction()
-
-# Print and set the protobuf library information,
-# finish this cmake process and exit from this file.
-macro(PROMPT_PROTOBUF_LIB)
- SET(protobuf_DEPS ${ARGN})
-
- MESSAGE(STATUS "Protobuf protoc executable: ${PROTOBUF_PROTOC_EXECUTABLE}")
- MESSAGE(STATUS "Protobuf-lite library: ${PROTOBUF_LITE_LIBRARY}")
- MESSAGE(STATUS "Protobuf library: ${PROTOBUF_LIBRARY}")
- MESSAGE(STATUS "Protoc library: ${PROTOBUF_PROTOC_LIBRARY}")
- MESSAGE(STATUS "Protobuf version: ${PROTOBUF_VERSION}")
- INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR})
-
- # Assuming that all the protobuf libraries are of the same type.
- IF(${PROTOBUF_LIBRARY} MATCHES ${CMAKE_STATIC_LIBRARY_SUFFIX})
- SET(protobuf_LIBTYPE STATIC)
- ELSEIF(${PROTOBUF_LIBRARY} MATCHES "${CMAKE_SHARED_LIBRARY_SUFFIX}$")
- SET(protobuf_LIBTYPE SHARED)
- ELSE()
- MESSAGE(FATAL_ERROR "Unknown library type: ${PROTOBUF_LIBRARY}")
- ENDIF()
-
- ADD_LIBRARY(protobuf ${protobuf_LIBTYPE} IMPORTED GLOBAL)
- SET_PROPERTY(TARGET protobuf PROPERTY IMPORTED_LOCATION ${PROTOBUF_LIBRARY})
-
- ADD_LIBRARY(protobuf_lite ${protobuf_LIBTYPE} IMPORTED GLOBAL)
- SET_PROPERTY(TARGET protobuf_lite PROPERTY IMPORTED_LOCATION ${PROTOBUF_LITE_LIBRARY})
-
- ADD_LIBRARY(libprotoc ${protobuf_LIBTYPE} IMPORTED GLOBAL)
- SET_PROPERTY(TARGET libprotoc PROPERTY IMPORTED_LOCATION ${PROTOC_LIBRARY})
-
- ADD_EXECUTABLE(protoc IMPORTED GLOBAL)
- SET_PROPERTY(TARGET protoc PROPERTY IMPORTED_LOCATION ${PROTOBUF_PROTOC_EXECUTABLE})
- # FIND_Protobuf.cmake uses `Protobuf_PROTOC_EXECUTABLE`.
- # make `protobuf_generate_cpp` happy.
- SET(Protobuf_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE})
-
- FOREACH(dep ${protobuf_DEPS})
- ADD_DEPENDENCIES(protobuf ${dep})
- ADD_DEPENDENCIES(protobuf_lite ${dep})
- ADD_DEPENDENCIES(libprotoc ${dep})
- ADD_DEPENDENCIES(protoc ${dep})
- ENDFOREACH()
-
- RETURN()
-endmacro()
-macro(SET_PROTOBUF_VERSION)
- EXEC_PROGRAM(${PROTOBUF_PROTOC_EXECUTABLE} ARGS --version OUTPUT_VARIABLE PROTOBUF_VERSION)
- STRING(REGEX MATCH "[0-9]+.[0-9]+" PROTOBUF_VERSION "${PROTOBUF_VERSION}")
-endmacro()
-
-set(PROTOBUF_ROOT "" CACHE PATH "Folder contains protobuf")
-IF (WIN32)
- SET(PROTOBUF_ROOT ${THIRD_PARTY_PATH}/install/protobuf)
-ENDIF(WIN32)
-
-if (NOT "${PROTOBUF_ROOT}" STREQUAL "")
- find_path(PROTOBUF_INCLUDE_DIR google/protobuf/message.h PATHS ${PROTOBUF_ROOT}/include NO_DEFAULT_PATH)
- find_library(PROTOBUF_LIBRARY protobuf libprotobuf.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH)
- find_library(PROTOBUF_LITE_LIBRARY protobuf-lite libprotobuf-lite.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH)
- find_library(PROTOBUF_PROTOC_LIBRARY protoc libprotoc.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH)
- find_program(PROTOBUF_PROTOC_EXECUTABLE protoc PATHS ${PROTOBUF_ROOT}/bin NO_DEFAULT_PATH)
- if (PROTOBUF_INCLUDE_DIR AND PROTOBUF_LIBRARY AND PROTOBUF_LITE_LIBRARY AND PROTOBUF_PROTOC_LIBRARY AND PROTOBUF_PROTOC_EXECUTABLE)
- message(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.")
- SET(PROTOBUF_FOUND true)
- SET_PROTOBUF_VERSION()
- PROMPT_PROTOBUF_LIB()
- else()
- message(WARNING "Cannot find protobuf library in ${PROTOBUF_ROOT}")
- endif()
-endif()
-
-FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST)
- STRING(REPLACE "extern_" "" TARGET_DIR_NAME "${TARGET_NAME}")
- SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/${TARGET_DIR_NAME})
- SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/${TARGET_DIR_NAME})
-
- SET(${TARGET_NAME}_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" PARENT_SCOPE)
- SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" PARENT_SCOPE)
- SET(${TARGET_NAME}_LITE_LIBRARY
- "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite${CMAKE_STATIC_LIBRARY_SUFFIX}"
- PARENT_SCOPE)
- SET(${TARGET_NAME}_LIBRARY
- "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf${CMAKE_STATIC_LIBRARY_SUFFIX}"
- PARENT_SCOPE)
- SET(${TARGET_NAME}_PROTOC_LIBRARY
- "${PROTOBUF_INSTALL_DIR}/lib/libprotoc${CMAKE_STATIC_LIBRARY_SUFFIX}"
- PARENT_SCOPE)
- SET(${TARGET_NAME}_PROTOC_EXECUTABLE
- "${PROTOBUF_INSTALL_DIR}/bin/protoc${CMAKE_EXECUTABLE_SUFFIX}"
- PARENT_SCOPE)
-
- # https://github.com/protocolbuffers/protobuf.git
- SET(PROTOBUF_REPO "")
- SET(PROTOBUF_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546")
- SET(OPTIONAL_CACHE_ARGS "")
- SET(OPTIONAL_ARGS "")
- SET(SOURCE_DIR "${CMAKE_SOURCE_DIR}/third-party/protobuf-host")
-
- IF(BUILD_FOR_HOST)
- # set for server compile.
- if (NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- set(HOST_C_COMPILER "${CMAKE_C_COMPILER}")
- set(HOST_CXX_COMPILER "${CMAKE_CXX_COMPILER}")
- endif()
-
- SET(OPTIONAL_ARGS
- "-DCMAKE_C_COMPILER=${HOST_C_COMPILER}"
- "-DCMAKE_CXX_COMPILER=${HOST_CXX_COMPILER}"
- "-Dprotobuf_WITH_ZLIB=OFF"
- "-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}")
- SET(OPTIONAL_CACHE_ARGS "-DZLIB_ROOT:STRING=${ZLIB_ROOT}")
- ELSE()
- # protobuf have compile issue when use android stl c++_static
- # https://github.com/tensor-tang/protobuf.git
- SET(PROTOBUF_REPO "")
- SET(PROTOBUF_TAG "mobile")
- SET(SOURCE_DIR "${CMAKE_SOURCE_DIR}/third-party/protobuf-mobile")
- SET(OPTIONAL_ARGS "-Dprotobuf_WITH_ZLIB=OFF"
- ${CROSS_COMPILE_CMAKE_ARGS}
- "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
- "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
- "-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}"
- "-DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG}"
- "-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}"
- "-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}"
- "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}"
- "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}")
- ENDIF()
- IF(WIN32)
- SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} "-DCMAKE_GENERATOR_PLATFORM=x64")
- ENDIF()
-
- if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- ExternalProject_Add(
- ${TARGET_NAME}
- ${EXTERNAL_PROJECT_LOG_ARGS}
- PREFIX ${PROTOBUF_SOURCES_DIR}
- SOURCE_SUBDIR cmake
- UPDATE_COMMAND ""
- GIT_REPOSITORY ""
- GIT_TAG ${PROTOBUF_TAG}
- SOURCE_DIR ${SOURCE_DIR}
- CMAKE_ARGS
- ${OPTIONAL_ARGS}
- -Dprotobuf_BUILD_TESTS=OFF
- -DCMAKE_SKIP_RPATH=ON
- -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
- -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR}
- -DCMAKE_INSTALL_LIBDIR=lib
- -DBUILD_SHARED_LIBS=OFF
- CMAKE_CACHE_ARGS
- -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR}
- -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
- -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
- -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
- ${OPTIONAL_CACHE_ARGS}
- )
- else()
- ExternalProject_Add(
- ${TARGET_NAME}
- ${EXTERNAL_PROJECT_LOG_ARGS}
- PREFIX ${SOURCE_DIR}
- UPDATE_COMMAND ""
- GIT_REPOSITORY ""
- GIT_TAG ${PROTOBUF_TAG}
- SOURCE_DIR ${SOURCE_DIR}
- CONFIGURE_COMMAND ${CMAKE_COMMAND} ${SOURCE_DIR}/cmake
- ${OPTIONAL_ARGS}
- -Dprotobuf_BUILD_TESTS=OFF
- -DCMAKE_SKIP_RPATH=ON
- -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
- -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR}
- -DCMAKE_INSTALL_LIBDIR=lib
- -DBUILD_SHARED_LIBS=OFF
- CMAKE_CACHE_ARGS
- -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR}
- -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
- -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
- -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
- ${OPTIONAL_CACHE_ARGS}
- )
- endif()
-ENDFUNCTION()
-
-SET(PROTOBUF_VERSION 3.1.0)
-
-IF(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- build_protobuf(protobuf_host TRUE)
- LIST(APPEND external_project_dependencies protobuf_host)
- SET(PROTOBUF_PROTOC_EXECUTABLE ${protobuf_host_PROTOC_EXECUTABLE}
- CACHE FILEPATH "protobuf executable." FORCE)
-ENDIF()
-
-IF(NOT PROTOBUF_FOUND)
- if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- build_protobuf(extern_protobuf FALSE)
- else()
- build_protobuf(extern_protobuf TRUE)
- endif()
-
- SET(PROTOBUF_INCLUDE_DIR ${extern_protobuf_INCLUDE_DIR}
- CACHE PATH "protobuf include directory." FORCE)
- SET(PROTOBUF_LITE_LIBRARY ${extern_protobuf_LITE_LIBRARY}
- CACHE FILEPATH "protobuf lite library." FORCE)
- SET(PROTOBUF_LIBRARY ${extern_protobuf_LIBRARY}
- CACHE FILEPATH "protobuf library." FORCE)
- SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY}
- CACHE FILEPATH "protoc library." FORCE)
-
- IF(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf)
- ELSE()
- SET(PROTOBUF_PROTOC_EXECUTABLE ${extern_protobuf_PROTOC_EXECUTABLE}
- CACHE FILEPATH "protobuf executable." FORCE)
- PROMPT_PROTOBUF_LIB(extern_protobuf)
- ENDIF()
-
-ENDIF(NOT PROTOBUF_FOUND)
diff --git a/cmake/external/xbyak.cmake b/cmake/external/xbyak.cmake
deleted file mode 100644
index 1d61154c0d45dea795902d6544deb796693db263..0000000000000000000000000000000000000000
--- a/cmake/external/xbyak.cmake
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set(WITH_XBYAK ON)
-if(WIN32 OR APPLE)
- SET(WITH_XBYAK OFF CACHE STRING "Disable XBYAK in Windows and MacOS" FORCE)
- return()
-endif()
-
-include(ExternalProject)
-
-set(XBYAK_PROJECT extern_xbyak)
-set(XBYAK_PREFIX_DIR ${THIRD_PARTY_PATH}/xbyak)
-set(XBYAK_INSTALL_ROOT ${THIRD_PARTY_PATH}/install/xbyak)
-set(XBYAK_INC_DIR ${XBYAK_INSTALL_ROOT}/include)
-
-include_directories(${XBYAK_INC_DIR})
-include_directories(${XBYAK_INC_DIR}/xbyak)
-
-add_definitions(-DPADDLE_WITH_XBYAK)
-
-# xbyak options
-add_definitions(-DXBYAK64)
-add_definitions(-DXBYAK_NO_OP_NAMES)
-
-ExternalProject_Add(
- ${XBYAK_PROJECT}
- ${EXTERNAL_PROJECT_LOG_ARGS}
- DEPENDS ""
- GIT_REPOSITORY "https://github.com/herumi/xbyak.git"
- GIT_TAG "v5.661" # Jul 26th
- PREFIX ${XBYAK_PREFIX_DIR}
- UPDATE_COMMAND ""
- CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${XBYAK_INSTALL_ROOT}
- CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${XBYAK_INSTALL_ROOT}
-)
-
-if (${CMAKE_VERSION} VERSION_LESS "3.3.0")
- set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/xbyak_dummy.c)
- file(WRITE ${dummyfile} "const char *dummy_xbyak = \"${dummyfile}\";")
- add_library(xbyak STATIC ${dummyfile})
-else()
- add_library(xbyak INTERFACE)
-endif()
-
-add_dependencies(xbyak ${XBYAK_PROJECT})
diff --git a/cmake/external/xxhash.cmake b/cmake/external/xxhash.cmake
deleted file mode 100644
index 23b1e02108642df561948a6faa3152effb7ca932..0000000000000000000000000000000000000000
--- a/cmake/external/xxhash.cmake
+++ /dev/null
@@ -1,73 +0,0 @@
-INCLUDE(ExternalProject)
-
-set(XXHASH_SOURCE_DIR ${THIRD_PARTY_PATH}/xxhash)
-set(XXHASH_INSTALL_DIR ${THIRD_PARTY_PATH}/install/xxhash)
-set(XXHASH_INCLUDE_DIR "${XXHASH_INSTALL_DIR}/include")
-
-IF(WITH_STATIC_LIB)
- SET(BUILD_CMD make lib)
-ELSE()
- IF(APPLE)
- SET(BUILD_CMD sed -i \"\" "s/-Wstrict-prototypes -Wundef/-Wstrict-prototypes -Wundef -fPIC/g" ${XXHASH_SOURCE_DIR}/src/extern_xxhash/Makefile && make lib)
- ELSE(APPLE)
- SET(BUILD_CMD sed -i "s/-Wstrict-prototypes -Wundef/-Wstrict-prototypes -Wundef -fPIC/g" ${XXHASH_SOURCE_DIR}/src/extern_xxhash/Makefile && make lib)
- ENDIF(APPLE)
-ENDIF()
-
-if(WIN32)
- ExternalProject_Add(
- extern_xxhash
- ${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY "https://github.com/Cyan4973/xxHash"
- GIT_TAG "v0.6.5"
- PREFIX ${XXHASH_SOURCE_DIR}
- DOWNLOAD_NAME "xxhash"
- UPDATE_COMMAND ""
- BUILD_IN_SOURCE 1
- PATCH_COMMAND
- CONFIGURE_COMMAND
- ${CMAKE_COMMAND} ${XXHASH_SOURCE_DIR}/src/extern_xxhash/cmake_unofficial
- -DCMAKE_INSTALL_PREFIX:PATH=${XXHASH_INSTALL_DIR}
- -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
- -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
- -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
- -DBUILD_XXHSUM=OFF
- -DCMAKE_GENERATOR_PLATFORM=x64
- -DBUILD_SHARED_LIBS=OFF
- ${OPTIONAL_CACHE_ARGS}
- TEST_COMMAND ""
- )
-else()
- ExternalProject_Add(
- extern_xxhash
- ${EXTERNAL_PROJECT_LOG_ARGS}
- GIT_REPOSITORY "https://github.com/Cyan4973/xxHash"
- GIT_TAG "v0.6.5"
- PREFIX ${XXHASH_SOURCE_DIR}
- DOWNLOAD_NAME "xxhash"
- UPDATE_COMMAND ""
- CONFIGURE_COMMAND ""
- BUILD_IN_SOURCE 1
- PATCH_COMMAND
- BUILD_COMMAND ${BUILD_CMD}
- INSTALL_COMMAND export PREFIX=${XXHASH_INSTALL_DIR}/ && make install
- TEST_COMMAND ""
- )
-endif()
-
-if (WIN32)
- IF(NOT EXISTS "${XXHASH_INSTALL_DIR}/lib/libxxhash.lib")
- add_custom_command(TARGET extern_xxhash POST_BUILD
- COMMAND cmake -E copy ${XXHASH_INSTALL_DIR}/lib/xxhash.lib ${XXHASH_INSTALL_DIR}/lib/libxxhash.lib
- )
- ENDIF()
- set(XXHASH_LIBRARIES "${XXHASH_INSTALL_DIR}/lib/libxxhash.lib")
-else()
- set(XXHASH_LIBRARIES "${XXHASH_INSTALL_DIR}/lib/libxxhash.a")
-endif ()
-INCLUDE_DIRECTORIES(${XXHASH_INCLUDE_DIR})
-
-add_library(xxhash STATIC IMPORTED GLOBAL)
-set_property(TARGET xxhash PROPERTY IMPORTED_LOCATION ${XXHASH_LIBRARIES})
-include_directories(${XXHASH_INCLUDE_DIR})
-add_dependencies(xxhash extern_xxhash)
diff --git a/cmake/flags.cmake b/cmake/flags.cmake
deleted file mode 100644
index 36b533aa4f7815896fb48c33fefad892b8d0d29c..0000000000000000000000000000000000000000
--- a/cmake/flags.cmake
+++ /dev/null
@@ -1,194 +0,0 @@
-# Setting Paddle Compile Flags
-include(CheckCXXCompilerFlag)
-include(CheckCCompilerFlag)
-include(CheckCXXSymbolExists)
-include(CheckTypeSize)
-
-function(CheckCompilerCXX11Flag)
- if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8)
- message(FATAL_ERROR "Unsupported GCC version. GCC >= 4.8 required.")
- endif()
- elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
- # cmake >= 3.0 compiler id "AppleClang" on Mac OS X, otherwise "Clang"
- # Apple Clang is a different compiler than upstream Clang which havs different version numbers.
- # https://gist.github.com/yamaya/2924292
- if(APPLE) # cmake < 3.0 compiler id "Clang" on Mac OS X
- if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 5.1)
- message(FATAL_ERROR "Unsupported AppleClang version. AppleClang >= 5.1 required.")
- endif()
- else()
- if (${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 3.3)
- message(FATAL_ERROR "Unsupported Clang version. Clang >= 3.3 required.")
- endif()
- endif()
- endif()
-endfunction()
-
-CheckCompilerCXX11Flag()
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
-# safe_set_flag
-#
-# Set a compile flag only if compiler is support
-# is_c: is C flag or C++ flag, bool type.
-# src_list: The list name which the flag name will be append to.
-# flag_name: the flag name for compiler, such as '-Werror' '-Wall' etc
-# rest arguments: not used.
-function(safe_set_flag is_c src_list flag_name)
- string(REPLACE "-" "_" safe_name ${flag_name})
- string(REPLACE "=" "_" safe_name ${safe_name})
- if(is_c)
- CHECK_C_COMPILER_FLAG(${flag_name} C_COMPILER_SUPPORT_FLAG_${safe_name})
- set(safe_name C_COMPILER_SUPPORT_FLAG_${safe_name})
- else()
- CHECK_CXX_COMPILER_FLAG(${flag_name} CXX_COMPILER_SUPPORT_FLAG_${safe_name})
- set(safe_name CXX_COMPILER_SUPPORT_FLAG_${safe_name})
- endif()
- if(${safe_name})
- set(${src_list} "${${src_list}} ${flag_name}" PARENT_SCOPE)
- endif()
-endfunction()
-
-# helper macro to set cflag
-macro(safe_set_cflag src_list flag_name)
- safe_set_flag(ON ${src_list} ${flag_name})
-endmacro()
-
-# helper macro to set cxxflag
-macro(safe_set_cxxflag src_list flag_name)
- safe_set_flag(OFF ${src_list} ${flag_name})
-endmacro()
-
-# helper macro to set nvcc flag
-macro(safe_set_nvflag flag_name)
- string(REPLACE "-" "_" safe_name ${flag_name})
- string(REPLACE "=" "_" safe_name ${safe_name})
- CHECK_C_COMPILER_FLAG(${flag_name} C_COMPILER_SUPPORT_FLAG_${safe_name})
- set(safe_name C_COMPILER_SUPPORT_FLAG_${safe_name})
- if(${safe_name})
- LIST(APPEND CUDA_NVCC_FLAGS -Xcompiler ${flag_name})
- endif()
-endmacro()
-
-macro(safe_set_static_flag) # set c_flags and cxx_flags to static or shared
- if (BUILD_SHARED_LIBS)
- return() # if build shared libs, the flags keep same with '/MD'
- endif(BUILD_SHARED_LIBS)
- foreach(flag_var
- CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
- CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO
- CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
- CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO)
- if(${flag_var} MATCHES "/MD")
- string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
- endif(${flag_var} MATCHES "/MD")
- endforeach(flag_var)
-endmacro()
-
-CHECK_CXX_SYMBOL_EXISTS(UINT64_MAX "stdint.h" UINT64_MAX_EXISTS)
-if(NOT UINT64_MAX_EXISTS)
- set(CMAKE_REQUIRED_DEFINITIONS -D__STDC_LIMIT_MACROS)
- CHECK_CXX_SYMBOL_EXISTS(UINT64_MAX "stdint.h" UINT64_MAX_EXISTS_HERE)
- if(UINT64_MAX_EXISTS_HERE)
- set(CMAKE_REQUIRED_DEFINITIONS)
- add_definitions(-D__STDC_LIMIT_MACROS)
- else()
- message(FATAL_ERROR "Cannot find symbol UINT64_MAX")
- endif()
-endif()
-
-SET(CMAKE_EXTRA_INCLUDE_FILES "pthread.h")
-CHECK_TYPE_SIZE(pthread_spinlock_t SPINLOCK_FOUND)
-CHECK_TYPE_SIZE(pthread_barrier_t BARRIER_FOUND)
-if(SPINLOCK_FOUND)
- add_definitions(-DPADDLE_USE_PTHREAD_SPINLOCK)
-endif(SPINLOCK_FOUND)
-if(BARRIER_FOUND)
- add_definitions(-DPADDLE_USE_PTHREAD_BARRIER)
-endif(BARRIER_FOUND)
-SET(CMAKE_EXTRA_INCLUDE_FILES "")
-
-# Common flags. the compiler flag used for C/C++ sources whenever release or debug
-# Do not care if this flag is support for gcc.
-
-# https://github.com/PaddlePaddle/Paddle/issues/12773
-if (NOT WIN32)
-set(COMMON_FLAGS
- -fPIC
- -fno-omit-frame-pointer
- -Werror
- -Wall
- -Wextra
- -Wnon-virtual-dtor
- -Wdelete-non-virtual-dtor
- -Wno-unused-parameter
- -Wno-unused-function
- -Wno-error=literal-suffix
- -Wno-error=sign-compare
- -Wno-error=unused-local-typedefs
- -Wno-error=parentheses-equality # Warnings in pybind11
- -Wno-error=ignored-attributes # Warnings in Eigen, gcc 6.3
- -Wno-error=terminate # Warning in PADDLE_ENFORCE
- -Wno-error=int-in-bool-context # Warning in Eigen gcc 7.2
- -Wimplicit-fallthrough=0 # Warning in tinyformat.h
- -Wno-error=maybe-uninitialized # Warning in boost gcc 7.2
-)
-
-set(GPU_COMMON_FLAGS
- -fPIC
- -fno-omit-frame-pointer
- -Wnon-virtual-dtor
- -Wdelete-non-virtual-dtor
- -Wno-unused-parameter
- -Wno-unused-function
- -Wno-error=sign-compare
- -Wno-error=literal-suffix
- -Wno-error=unused-local-typedefs
- -Wno-error=unused-function # Warnings in Numpy Header.
- -Wno-error=array-bounds # Warnings in Eigen::array
-)
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64")
-endif(NOT WIN32)
-
-if (APPLE)
- # On Mac OS X build fat binaries with x86_64 architectures by default.
- set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE)
- # On Mac OS X register class specifier is deprecated and will cause warning error on latest clang 10.0
- set (COMMON_FLAGS -Wno-deprecated-register)
-endif(APPLE)
-
-if(LINUX)
- set(GPU_COMMON_FLAGS
- -Wall
- -Wextra
- -Werror
- ${GPU_COMMON_FLAGS})
-endif(LINUX)
-
-if(UNIX AND NOT APPLE)
- # except apple from nix*Os family
- set(LINUX TRUE)
-endif(UNIX AND NOT APPLE)
-
-foreach(flag ${COMMON_FLAGS})
- safe_set_cflag(CMAKE_C_FLAGS ${flag})
- safe_set_cxxflag(CMAKE_CXX_FLAGS ${flag})
-
-endforeach()
-
-foreach(flag ${GPU_COMMON_FLAGS})
- safe_set_nvflag(${flag})
-endforeach()
-
-if(WIN32)
-# windows build turn off warnings.
-safe_set_static_flag()
- foreach(flag_var
- CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
- CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO
- CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
- CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO)
- string(REGEX REPLACE "(^| )/W[0-9]( |$)" " " ${flag_var} "${${flag_var}}")
- set(flag_var "${flag_var} /w")
- endforeach(flag_var)
-endif(WIN32)
diff --git a/cmake/generic.cmake b/cmake/generic.cmake
deleted file mode 100644
index a87c64cbe9796f149585816a4eadf1a7376fb450..0000000000000000000000000000000000000000
--- a/cmake/generic.cmake
+++ /dev/null
@@ -1,567 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-# generic.cmake defines CMakes functions that look like Bazel's
-# building rules (https://bazel.build/).
-#
-#
-# -------------------------------------------
-# C++ CUDA C++ Go
-# -------------------------------------------
-# cc_library nv_library go_library
-# cc_binary nv_binary go_binary
-# cc_test nv_test go_test
-# -------------------------------------------
-#
-# To build a static library example.a from example.cc using the system
-# compiler (like GCC):
-#
-# cc_library(example SRCS example.cc)
-#
-# To build a static library example.a from multiple source files
-# example{1,2,3}.cc:
-#
-# cc_library(example SRCS example1.cc example2.cc example3.cc)
-#
-# To build a shared library example.so from example.cc:
-#
-# cc_library(example SHARED SRCS example.cc)
-#
-# To build a library using Nvidia's NVCC from .cu file(s), use the nv_
-# prefixed version:
-#
-# nv_library(example SRCS example.cu)
-#
-# To specify that a library new_example.a depends on other libraies:
-#
-# cc_library(new_example SRCS new_example.cc DEPS example)
-#
-# Static libraries can be composed of other static libraries:
-#
-# cc_library(composed DEPS dependent1 dependent2 dependent3)
-#
-# To build an executable binary file from some source files and
-# dependent libraries:
-#
-# cc_binary(example SRCS main.cc something.cc DEPS example1 example2)
-#
-# To build an executable binary file using NVCC, use the nv_ prefixed
-# version:
-#
-# nv_binary(example SRCS main.cc something.cu DEPS example1 example2)
-#
-# To build a unit test binary, which is an executable binary with
-# GoogleTest linked:
-#
-# cc_test(example_test SRCS example_test.cc DEPS example)
-#
-# To build a unit test binary using NVCC, use the nv_ prefixed version:
-#
-# nv_test(example_test SRCS example_test.cu DEPS example)
-#
-# It is pretty often that executable and test binaries depend on
-# pre-defined external libaries like glog and gflags defined in
-# /cmake/external/*.cmake:
-#
-# cc_test(example_test SRCS example_test.cc DEPS example glog gflags)
-#
-# To build a go static library using Golang, use the go_ prefixed version:
-#
-# go_library(example STATIC)
-#
-# To build a go shared library using Golang, use the go_ prefixed version:
-#
-# go_library(example SHARED)
-#
-
-# including binary directory for generated headers.
-include_directories(${CMAKE_CURRENT_BINARY_DIR})
-
-if(NOT APPLE)
- find_package(Threads REQUIRED)
- link_libraries(${CMAKE_THREAD_LIBS_INIT})
- set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -pthread -ldl")
- if (NOT ANDROID)
- set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -lrt")
- endif()
-endif(NOT APPLE)
-
-set_property(GLOBAL PROPERTY FLUID_MODULES "")
-# find all fluid modules is used for paddle fluid static library
-# for building inference libs
-function(find_fluid_modules TARGET_NAME)
- get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE)
- string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path})
- string(FIND "${__target_path}" "fluid" pos)
- if(pos GREATER 1)
- get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
- set(fluid_modules ${fluid_modules} ${TARGET_NAME})
- set_property(GLOBAL PROPERTY FLUID_MODULES "${fluid_modules}")
- endif()
-endfunction(find_fluid_modules)
-
-
-function(common_link TARGET_NAME)
- if (WITH_PROFILER)
- target_link_libraries(${TARGET_NAME} gperftools::profiler)
- endif()
-
- if (WITH_JEMALLOC)
- target_link_libraries(${TARGET_NAME} jemalloc::jemalloc)
- endif()
-endfunction()
-
-
-# find all third_party modules is used for paddle static library
-# for reduce the dependency when building the inference libs.
-set_property(GLOBAL PROPERTY FLUID_THIRD_PARTY)
-function(find_fluid_thirdparties TARGET_NAME)
- get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE)
- string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path})
- string(FIND "${__target_path}" "third_party" pos)
- if(pos GREATER 1)
- get_property(fluid_ GLOBAL PROPERTY FLUID_THIRD_PARTY)
- set(fluid_third_partys ${fluid_third_partys} ${TARGET_NAME})
- set_property(GLOBAL PROPERTY FLUID_THIRD_PARTY "${fluid_third_partys}")
- endif()
-endfunction(find_fluid_thirdparties)
-
-function(merge_static_libs TARGET_NAME)
- set(libs ${ARGN})
- list(REMOVE_DUPLICATES libs)
-
- # Get all propagation dependencies from the merged libraries
- foreach(lib ${libs})
- list(APPEND libs_deps ${${lib}_LIB_DEPENDS})
- endforeach()
- if(libs_deps)
- list(REMOVE_DUPLICATES libs_deps)
- endif()
-
- # To produce a library we need at least one source file.
- # It is created by add_custom_command below and will helps
- # also help to track dependencies.
- set(target_SRCS ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}_dummy.c)
-
- if(APPLE) # Use OSX's libtool to merge archives
- # Make the generated dummy source file depended on all static input
- # libs. If input lib changes,the source file is touched
- # which causes the desired effect (relink).
- add_custom_command(OUTPUT ${target_SRCS}
- COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS}
- DEPENDS ${libs})
-
- # Generate dummy staic lib
- file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";")
- add_library(${TARGET_NAME} STATIC ${target_SRCS})
- target_link_libraries(${TARGET_NAME} ${libs_deps})
-
- foreach(lib ${libs})
- # Get the file names of the libraries to be merged
- set(libfiles ${libfiles} $)
- endforeach()
- add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
- COMMAND rm "${CMAKE_CURRENT_BINARY_DIR}/lib${TARGET_NAME}.a"
- COMMAND /usr/bin/libtool -static -o "${CMAKE_CURRENT_BINARY_DIR}/lib${TARGET_NAME}.a" ${libfiles}
- )
- endif(APPLE)
- if(LINUX) # general UNIX: use "ar" to extract objects and re-add to a common lib
- set(target_DIR ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.dir)
-
- foreach(lib ${libs})
- set(objlistfile ${target_DIR}/${lib}.objlist) # list of objects in the input library
- set(objdir ${target_DIR}/${lib}.objdir)
-
- add_custom_command(OUTPUT ${objdir}
- COMMAND ${CMAKE_COMMAND} -E make_directory ${objdir}
- DEPENDS ${lib})
-
- add_custom_command(OUTPUT ${objlistfile}
- COMMAND ${CMAKE_AR} -x "$"
- COMMAND ${CMAKE_AR} -t "$" > ${objlistfile}
- DEPENDS ${lib} ${objdir}
- WORKING_DIRECTORY ${objdir})
-
- list(APPEND target_OBJS "${objlistfile}")
- endforeach()
-
- # Make the generated dummy source file depended on all static input
- # libs. If input lib changes,the source file is touched
- # which causes the desired effect (relink).
- add_custom_command(OUTPUT ${target_SRCS}
- COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS}
- DEPENDS ${libs} ${target_OBJS})
-
- # Generate dummy staic lib
- file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";")
- add_library(${TARGET_NAME} STATIC ${target_SRCS})
- target_link_libraries(${TARGET_NAME} ${libs_deps})
-
- # Get the file name of the generated library
- set(target_LIBNAME "$")
-
- add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
- COMMAND ${CMAKE_AR} crs ${target_LIBNAME} `find ${target_DIR} -name '*.o'`
- COMMAND ${CMAKE_RANLIB} ${target_LIBNAME}
- WORKING_DIRECTORY ${target_DIR})
- endif(LINUX)
- if(WIN32) # windows do not support gcc/nvcc combined compiling. Use msvc lib.exe to merge libs.
- # Make the generated dummy source file depended on all static input
- # libs. If input lib changes,the source file is touched
- # which causes the desired effect (relink).
- add_custom_command(OUTPUT ${target_SRCS}
- COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS}
- DEPENDS ${libs})
-
- # Generate dummy staic lib
- file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";")
- add_library(${TARGET_NAME} STATIC ${target_SRCS})
- target_link_libraries(${TARGET_NAME} ${libs_deps})
-
- foreach(lib ${libs})
- # Get the file names of the libraries to be merged
- set(libfiles ${libfiles} $)
- endforeach()
- # msvc will put libarary in directory of "/Release/xxxlib" by default
- # COMMAND cmake -E remove "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/${TARGET_NAME}.lib"
- add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
- COMMAND cmake -E make_directory "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}"
- COMMAND lib /OUT:${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/lib${TARGET_NAME}.lib ${libfiles}
- )
- endif(WIN32)
-endfunction(merge_static_libs)
-
-function(cc_library TARGET_NAME)
- set(options STATIC static SHARED shared)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- cmake_parse_arguments(cc_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- if(WIN32)
- # add libxxx.lib prefix in windows
- set(${TARGET_NAME}_LIB_NAME "${CMAKE_STATIC_LIBRARY_PREFIX}${TARGET_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE STRING "output library name for target ${TARGET_NAME}")
- endif(WIN32)
- if(cc_library_SRCS)
- if(cc_library_SHARED OR cc_library_shared) # build *.so
- add_library(${TARGET_NAME} SHARED ${cc_library_SRCS})
- else()
- add_library(${TARGET_NAME} STATIC ${cc_library_SRCS})
- find_fluid_modules(${TARGET_NAME})
- endif()
-
- if(cc_library_DEPS)
- # Don't need link libwarpctc.so
- if("${cc_library_DEPS};" MATCHES "warpctc;")
- list(REMOVE_ITEM cc_library_DEPS warpctc)
- add_dependencies(${TARGET_NAME} warpctc)
- endif()
- # Only deps libmklml.so, not link
- if("${cc_library_DEPS};" MATCHES "mklml;")
- list(REMOVE_ITEM cc_library_DEPS mklml)
- if(NOT "${TARGET_NAME}" MATCHES "dynload_mklml")
- list(APPEND cc_library_DEPS dynload_mklml)
- endif()
- add_dependencies(${TARGET_NAME} mklml)
- if(WIN32)
- target_link_libraries(${TARGET_NAME} ${MKLML_IOMP_LIB})
- else(WIN32)
- target_link_libraries(${TARGET_NAME} "-L${MKLML_LIB_DIR} -liomp5 -Wl,--as-needed")
- endif(WIN32)
- endif()
- # remove link to python, see notes at:
- # https://github.com/pybind/pybind11/blob/master/docs/compiling.rst#building-manually
- if("${cc_library_DEPS};" MATCHES "python;")
- list(REMOVE_ITEM cc_library_DEPS python)
- add_dependencies(${TARGET_NAME} python)
- if(WIN32)
- target_link_libraries(${TARGET_NAME} ${PYTHON_LIBRARIES})
- else()
- target_link_libraries(${TARGET_NAME} "-Wl,-undefined,dynamic_lookup")
- endif(WIN32)
- endif()
- target_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
- add_dependencies(${TARGET_NAME} ${cc_library_DEPS})
- common_link(${TARGET_NAME})
- endif()
-
- set(full_path_src "")
- # cpplint code style
- foreach(source_file ${cc_library_SRCS})
- string(REGEX REPLACE "\\.[^.]*$" "" source ${source_file})
- if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
- list(APPEND cc_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
- endif()
- if(${source_file} MATCHES "framework.pb.cc")
- list(APPEND full_path_src ${source_file})
- else()
- list(APPEND full_path_src ${CMAKE_CURRENT_SOURCE_DIR}/${source_file})
- endif()
- endforeach()
- set(__lite_cc_files ${__lite_cc_files} ${full_path_src} CACHE INTERNAL "")
- else(cc_library_SRCS)
- if(cc_library_DEPS)
- merge_static_libs(${TARGET_NAME} ${cc_library_DEPS})
- else()
- message(FATAL_ERROR "Please specify source files or libraries in cc_library(${TARGET_NAME} ...).")
- endif()
- endif(cc_library_SRCS)
-endfunction(cc_library)
-
-# The link operation under windows may exceeds the maximum characters limit, simply break the link command
-# into multiple link opeartion can fix that, say
-# original:
-# lib /out:target.lib a.lib b.lib c.lib d.lib
-# after:
-# 1. lib /out:dummy_lib_1.lib a.lib b.lib
-# 2. lib /out:dummy_lib_2.lib c.lib d.lib
-# 1. lib /out:target.lib dummy_lib_1.lib dummy_lib_2.lib
-function(sep_library TARGET_NAME)
- set(options STATIC static SHARED shared)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- cmake_parse_arguments(sep_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- set(dummy_index 1)
- set(dummy_offset 1)
- # the dummy target would be consisted of limit size libraries
- set(dummy_limit 50)
- list(LENGTH sep_library_DEPS sep_all_len)
- foreach(v ${sep_library_DEPS})
- list(APPEND dummy_list ${v})
- list(LENGTH dummy_list listlen )
- if ((${listlen} GREATER ${dummy_limit}) OR (${dummy_offset} EQUAL ${sep_all_len}))
- message("create dummy library ${TARGET_NAME}_dummy_lib_${dummy_index} for ${TARGET_NAME}")
- cc_library(${TARGET_NAME}_dummy_lib_${dummy_index} STATIC DEPS ${dummy_list})
- foreach(i ${dummy_list})
- list(REMOVE_AT dummy_list 0)
- endforeach()
- list(APPEND ${TARGET_NAME}_dummy_list ${TARGET_NAME}_dummy_lib_${dummy_index})
- MATH(EXPR dummy_index "${dummy_index}+1")
- endif()
- MATH(EXPR dummy_offset "${dummy_offset}+1")
- endforeach()
- if(${sep_library_SHARED})
- cc_library(${TARGET_NAME} SHARED SRCS ${sep_library_SRCS} DEPS ${${TARGET_NAME}_dummy_list})
- else(${sep_library_SHARED})
- cc_library(${TARGET_NAME} STATIC SRCS ${sep_library_SRCS} DEPS ${${TARGET_NAME}_dummy_list})
- endif(${sep_library_SHARED})
-endfunction(sep_library)
-
-function(cc_binary TARGET_NAME)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- cmake_parse_arguments(cc_binary "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- add_executable(${TARGET_NAME} ${cc_binary_SRCS})
- if(cc_binary_DEPS)
- target_link_libraries(${TARGET_NAME} ${cc_binary_DEPS})
- add_dependencies(${TARGET_NAME} ${cc_binary_DEPS})
- common_link(${TARGET_NAME})
- endif()
- get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
- target_link_libraries(${TARGET_NAME} ${os_dependency_modules})
-endfunction(cc_binary)
-
-function(cc_test TARGET_NAME)
- if(WITH_TESTING)
- set(options SERIAL)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS ARGS)
- cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- add_executable(${TARGET_NAME} ${cc_test_SRCS})
- if(WIN32)
- if("${cc_test_DEPS};" MATCHES "python;")
- list(REMOVE_ITEM cc_test_DEPS python)
- target_link_libraries(${TARGET_NAME} ${PYTHON_LIBRARIES})
- endif()
- endif(WIN32)
- get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
- target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} ${os_dependency_modules} paddle_gtest_main memory gtest gflags glog)
- add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog)
- common_link(${TARGET_NAME})
- add_test(NAME ${TARGET_NAME}
- COMMAND ${TARGET_NAME} ${cc_test_ARGS}
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
- if (${cc_test_SERIAL})
- set_property(TEST ${TARGET_NAME} PROPERTY RUN_SERIAL 1)
- endif()
- set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cpu_deterministic=true)
- set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true)
- set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_limit_of_tmp_allocation=4294967296) # 4G
- set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cudnn_deterministic=true)
- # No unit test should exceed 10 minutes.
- set_tests_properties(${TARGET_NAME} PROPERTIES TIMEOUT 600)
- endif()
-endfunction(cc_test)
-
-# cc_test without default dependencies
-function(raw_cc_test TARGET_NAME)
- if(WITH_TESTING)
- set(options SERIAL)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS ARGS)
- cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- add_executable(${TARGET_NAME} ${cc_test_SRCS})
- if(WIN32)
- if("${cc_test_DEPS};" MATCHES "python;")
- list(REMOVE_ITEM cc_test_DEPS python)
- target_link_libraries(${TARGET_NAME} ${PYTHON_LIBRARIES})
- endif()
- endif(WIN32)
- get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
-
- if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} ${os_dependency_modules} lite_gtest_main gtest gflags logging)
- add_dependencies(${TARGET_NAME} ${cc_test_DEPS} lite_gtest_main gtest gflags logging)
- else()
- target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} ${os_dependency_modules} lite_gtest_main gtest gflags glog)
- add_dependencies(${TARGET_NAME} ${cc_test_DEPS} lite_gtest_main gtest gflags glog)
- endif(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
-
- common_link(${TARGET_NAME})
- add_test(NAME ${TARGET_NAME}
- COMMAND ${TARGET_NAME} ${cc_test_ARGS}
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
- if (${cc_test_SERIAL})
- set_property(TEST ${TARGET_NAME} PROPERTY RUN_SERIAL 1)
- endif()
- # No unit test should exceed 10 minutes.
- set_tests_properties(${TARGET_NAME} PROPERTIES TIMEOUT 600)
- endif()
-endfunction(raw_cc_test)
-
-function(_lite_cc_test args)
- message(STATUS "building lite raw test: ${args}")
- raw_cc_test(${args} ${ARGN})
-endfunction()
-
-function(nv_library TARGET_NAME)
- if (LITE_WITH_CUDA)
- set(options STATIC static SHARED shared)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- cmake_parse_arguments(nv_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- if(nv_library_SRCS)
- if (nv_library_SHARED OR nv_library_shared) # build *.so
- cuda_add_library(${TARGET_NAME} SHARED ${nv_library_SRCS})
- else()
- cuda_add_library(${TARGET_NAME} STATIC ${nv_library_SRCS})
- find_fluid_modules(${TARGET_NAME})
- endif()
- if (nv_library_DEPS)
- add_dependencies(${TARGET_NAME} ${nv_library_DEPS})
- target_link_libraries(${TARGET_NAME} ${nv_library_DEPS})
- endif()
- # cpplint code style
- foreach(source_file ${nv_library_SRCS})
- string(REGEX REPLACE "\\.[^.]*$" "" source ${source_file})
- if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
- list(APPEND nv_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
- endif()
- endforeach()
- else(nv_library_SRCS)
- if (nv_library_DEPS)
- merge_static_libs(${TARGET_NAME} ${nv_library_DEPS})
- else()
- message(FATAL "Please specify source file or library in nv_library.")
- endif()
- endif(nv_library_SRCS)
- endif()
-endfunction(nv_library)
-
-function(nv_binary TARGET_NAME)
- if (LITE_WITH_CUDA)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- cmake_parse_arguments(nv_binary "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- cuda_add_executable(${TARGET_NAME} ${nv_binary_SRCS})
- if(nv_binary_DEPS)
- target_link_libraries(${TARGET_NAME} ${nv_binary_DEPS})
- add_dependencies(${TARGET_NAME} ${nv_binary_DEPS})
- common_link(${TARGET_NAME})
- endif()
- endif()
-endfunction(nv_binary)
-
-function(nv_test TARGET_NAME)
- if (LITE_WITH_CUDA AND WITH_TESTING)
- set(options SERIAL)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS})
- get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES)
- target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} lite_gtest_main gtest
-gflags glog ${os_dependency_modules} ${CUDNN_LIBRARY})
- add_dependencies(${TARGET_NAME} ${nv_test_DEPS} lite_gtest_main gtest gflags glog)
- common_link(${TARGET_NAME})
- add_test(${TARGET_NAME} ${TARGET_NAME})
- if (nv_test_SERIAL)
- set_property(TEST ${TARGET_NAME} PROPERTY RUN_SERIAL 1)
- endif()
- endif()
-endfunction(nv_test)
-
-
-# Modification of standard 'protobuf_generate_cpp()' with protobuf-lite support
-# Usage:
-# paddle_protobuf_generate_cpp( )
-
-function(paddle_protobuf_generate_cpp SRCS HDRS)
- if(NOT ARGN)
- message(SEND_ERROR "Error: paddle_protobuf_generate_cpp() called without any proto files")
- return()
- endif()
-
- set(${SRCS})
- set(${HDRS})
-
- foreach(FIL ${ARGN})
- get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
- get_filename_component(FIL_WE ${FIL} NAME_WE)
-
- set(_protobuf_protoc_src "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc")
- set(_protobuf_protoc_hdr "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h")
- list(APPEND ${SRCS} "${_protobuf_protoc_src}")
- list(APPEND ${HDRS} "${_protobuf_protoc_hdr}")
-
- add_custom_command(
- OUTPUT "${_protobuf_protoc_src}"
- "${_protobuf_protoc_hdr}"
-
- COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_CURRENT_BINARY_DIR}"
- COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
- -I${CMAKE_CURRENT_SOURCE_DIR}
- --cpp_out "${CMAKE_CURRENT_BINARY_DIR}" ${ABS_FIL}
- DEPENDS ${ABS_FIL} protoc
- COMMENT "Running C++ protocol buffer compiler on ${FIL}"
- VERBATIM )
- endforeach()
-
- set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
- set(${SRCS} ${${SRCS}} PARENT_SCOPE)
- set(${HDRS} ${${HDRS}} PARENT_SCOPE)
-endfunction()
-
-
-function(proto_library TARGET_NAME)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- cmake_parse_arguments(proto_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
- set(proto_srcs)
- set(proto_hdrs)
- paddle_protobuf_generate_cpp(proto_srcs proto_hdrs ${proto_library_SRCS})
- cc_library(${TARGET_NAME} SRCS ${proto_srcs} DEPS ${proto_library_DEPS} protobuf)
-endfunction()
diff --git a/cmake/hip.cmake b/cmake/hip.cmake
deleted file mode 100644
index c3a748db502037f926dc241e4c3bc26a83ad3468..0000000000000000000000000000000000000000
--- a/cmake/hip.cmake
+++ /dev/null
@@ -1,53 +0,0 @@
-if(NOT WITH_AMD_GPU)
- return()
-endif()
-
-include_directories("/opt/rocm/include")
-include_directories("/opt/rocm/hip/include")
-include_directories("/opt/rocm/miopen/include")
-include_directories("/opt/rocm/hipblas/include")
-include_directories("/opt/rocm/hiprand/include")
-include_directories("/opt/rocm/rocrand/include")
-include_directories("/opt/rocm/rccl/include")
-include_directories("/opt/rocm/thrust")
-
-set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -fPIC -DPADDLE_WITH_HIP -std=c++11" )
-
-if(WITH_DSO)
- set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_USE_DSO")
-endif(WITH_DSO)
-
-if(WITH_TESTING)
- set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_TESTING")
-endif(WITH_TESTING)
-
-if(WITH_DISTRIBUTE)
- set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_DISTRIBUTE")
-endif(WITH_DISTRIBUTE)
-
-if(WITH_GRPC)
- set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_GRPC")
-endif(WITH_GRPC)
-
-if(WITH_MKLDNN)
- set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_MKLDNN")
-endif(WITH_MKLDNN)
-
-set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DANY_IMPL_ANY_CAST_MOVEABLE")
-
-if(CMAKE_BUILD_TYPE STREQUAL "Debug")
- list(APPEND HIP_HCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
-elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
- list(APPEND HIP_HCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
-elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
- list(APPEND HIP_HCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL})
-endif()
-
-if("x${HCC_HOME}" STREQUAL "x")
- set(HCC_HOME "/opt/rocm/hcc")
-endif()
-
-set(CMAKE_HIP_LINK_EXECUTABLE "${HIP_HIPCC_CMAKE_LINKER_HELPER} ${HCC_HOME} -o ")
-set(CMAKE_HIP_CREATE_SHARED_LIBRARY "${HIP_HIPCC_CMAKE_LINKER_HELPER} ${HCC_HOME} -o -shared")
-set(CMAKE_HIP_CREATE_SHARED_MODULE "${HIP_HIPCC_CMAKE_LINKER_HELPER} ${HCC_HOME} -o -shared")
-
diff --git a/cmake/lite.cmake b/cmake/lite.cmake
deleted file mode 100644
index 707982a3e7030aabe1733e015792d542eaa9f152..0000000000000000000000000000000000000000
--- a/cmake/lite.cmake
+++ /dev/null
@@ -1,435 +0,0 @@
-set(LITE_URL "http://paddle-inference-dist.bj.bcebos.com" CACHE STRING "inference download url")
-
-function(lite_download_and_uncompress INSTALL_DIR URL FILENAME)
- message(STATUS "Download inference test stuff from ${URL}/${FILENAME}")
- string(REGEX REPLACE "[-%.]" "_" FILENAME_EX ${FILENAME})
- set(EXTERNAL_PROJECT_NAME "extern_lite_download_${FILENAME_EX}")
- set(UNPACK_DIR "${INSTALL_DIR}/src/${EXTERNAL_PROJECT_NAME}")
- ExternalProject_Add(
- ${EXTERNAL_PROJECT_NAME}
- ${EXTERNAL_PROJECT_LOG_ARGS}
- PREFIX ${INSTALL_DIR}
- DOWNLOAD_COMMAND wget --no-check-certificate -q -O ${INSTALL_DIR}/${FILENAME} ${URL}/${FILENAME} && ${CMAKE_COMMAND} -E tar xzf ${INSTALL_DIR}/${FILENAME}
- DOWNLOAD_DIR ${INSTALL_DIR}
- DOWNLOAD_NO_PROGRESS 1
- CONFIGURE_COMMAND ""
- BUILD_COMMAND ""
- UPDATE_COMMAND ""
- INSTALL_COMMAND ""
- )
-endfunction()
-
-function (lite_deps TARGET)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs DEPS X86_DEPS CUDA_DEPS ARM_DEPS PROFILE_DEPS LIGHT_DEPS HVY_DEPS CL_DEPS FPGA_DEPS NPU_DEPS ARGS)
- cmake_parse_arguments(lite_deps "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
-
- set(deps ${lite_deps_DEPS})
-
- if(LITE_WITH_X86)
- foreach(var ${lite_deps_X86_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- if(LITE_WITH_CUDA)
- foreach(var ${lite_deps_CUDA_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- if(LITE_WITH_ARM)
- foreach(var ${lite_deps_ARM_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- if(LITE_WITH_PROFILE)
- foreach(var ${lite_deps_PROFILE_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- foreach(var ${lite_deps_LIGHT_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
-
-
- if (NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- foreach(var ${lite_deps_HVY_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- if (LITE_WITH_OPENCL)
- foreach(var ${lite_deps_CL_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- if (LITE_WITH_FPGA)
- foreach(var ${lite_deps_FPGA_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- if (LITE_WITH_NPU)
- foreach(var ${lite_deps_NPU_DEPS})
- set(deps ${deps} ${var})
- endforeach(var)
- endif()
-
- set(${TARGET} ${deps} PARENT_SCOPE)
-endfunction()
-
-
-# A fake target to include all the libraries and tests the lite module depends.
-add_custom_target(lite_compile_deps COMMAND echo 1)
-
-# Add names for lite libraries for latter compile. We use this name list to avoid compiling
-# the whole fluid project to accelerate the compile speed.
-set(offline_lib_registry_file "${CMAKE_BINARY_DIR}/lite_libs.txt")
-file(WRITE ${offline_lib_registry_file} "") # clean
-
-# cc_library with branch support.
-# The branches:
-# X86_DEPS: works only when LITE_WITH_X86 is ON.
-# CUDA_DEPS: LITE_WITH_CUDA
-# ARM_DEPS: LITE_WITH_ARM
-# PROFILE_DEPS: LITE_WITH_PROFILE
-# LIGHT_DEPS: LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
-# HVY_DEPS: NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
-# EXCLUDE_COMPILE_DEPS: TARGET will not be included in lite_compile_deps if this is not None
-function(lite_cc_library TARGET)
- set(options SHARED shared STATIC static MODULE module)
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS NPU_DEPS ARM_DEPS FPGA_DEPS PROFILE_DEPS LIGHT_DEPS
- HVY_DEPS EXCLUDE_COMPILE_DEPS ARGS)
- cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
-
- set(deps "")
- lite_deps(deps
- DEPS ${args_DEPS}
- X86_DEPS ${args_X86_DEPS}
- CUDA_DEPS ${args_CUDA_DEPS}
- CL_DEPS ${args_CL_DEPS}
- NPU_DEPS ${args_NPU_DEPS}
- ARM_DEPS ${args_ARM_DEPS}
- FPGA_DEPS ${args_FPGA_DEPS}
- PROFILE_DEPS ${args_PROFILE_DEPS}
- LIGHT_DEPS ${args_LIGHT_DEPS}
- HVY_DEPS ${args_HVY_DEPS}
- )
-
- if (args_SHARED OR ARGS_shared)
- cc_library(${TARGET} SRCS ${args_SRCS} DEPS ${deps} ${args_DEPS} SHARED)
- elseif (args_MODULE OR ARGS_module)
- add_library(${TARGET} MODULE ${args_SRCS})
- add_dependencies(${TARGET} ${deps} ${args_DEPS})
- else()
- cc_library(${TARGET} SRCS ${args_SRCS} DEPS ${deps} ${args_DEPS})
- endif()
- target_compile_options(${TARGET} BEFORE PRIVATE -Wno-ignored-qualifiers)
-
- # collect targets need to compile for lite
- if (args_SRCS AND NOT args_EXCLUDE_COMPILE_DEPS)
- add_dependencies(lite_compile_deps ${TARGET})
- endif()
-
- # register a library name.
- file(APPEND ${offline_lib_registry_file} "${TARGET}\n")
-endfunction()
-
-function(lite_cc_binary TARGET)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS PROFILE_DEPS
- LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS ARGS)
- cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
-
- set(deps "")
- lite_deps(deps
- DEPS ${args_DEPS}
- X86_DEPS ${args_X86_DEPS}
- CUDA_DEPS ${args_CUDA_DEPS}
- CL_DEPS ${args_CL_DEPS}
- ARM_DEPS ${args_ARM_DEPS}
- FPGA_DEPS ${args_FPGA_DEPS}
- PROFILE_DEPS ${args_PROFILE_DEPS}
- LIGHT_DEPS ${args_LIGHT_DEPS}
- HVY_DEPS ${args_HVY_DEPS}
- )
- cc_binary(${TARGET} SRCS ${args_SRCS} DEPS ${deps} ${args_DEPS})
- target_compile_options(${TARGET} BEFORE PRIVATE -Wno-ignored-qualifiers)
- # collect targets need to compile for lite
- if (NOT args_EXCLUDE_COMPILE_DEPS)
- add_dependencies(lite_compile_deps ${TARGET})
- endif()
-endfunction()
-
-# Add a unit-test name to file for latter offline manual test.
-set(offline_test_registry_file "${CMAKE_BINARY_DIR}/lite_tests.txt")
-file(WRITE ${offline_test_registry_file} "") # clean
-# Test lite modules.
-
-function(lite_cc_test TARGET)
- if(NOT WITH_TESTING)
- return()
- endif()
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS PROFILE_DEPS
- LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS
- ARGS
- COMPILE_LEVEL # (basic|extra)
- )
- cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
-
- if (args_COMPILE_LEVEL STREQUAL "extra" AND (NOT LITE_BUILD_EXTRA))
- MESSAGE(STATUS "Ignore test ${TARGET} due to compile level ${args_COMPILE_LEVEL}")
- return()
- endif()
-
- set(deps "")
- lite_deps(deps
- DEPS ${args_DEPS}
- X86_DEPS ${args_X86_DEPS}
- CUDA_DEPS ${args_CUDA_DEPS}
- CL_DEPS ${args_CL_DEPS}
- ARM_DEPS ${args_ARM_DEPS}
- FPGA_DEPS ${args_FPGA_DEPS}
- PROFILE_DEPS ${args_PROFILE_DEPS}
- LIGHT_DEPS ${args_LIGHT_DEPS}
- HVY_DEPS ${args_HVY_DEPS}
- )
- _lite_cc_test(${TARGET} SRCS ${args_SRCS} DEPS ${deps} ARGS ${args_ARGS})
- target_compile_options(${TARGET} BEFORE PRIVATE -Wno-ignored-qualifiers)
- file(APPEND ${offline_test_registry_file} "${TARGET}\n")
-
- # collect targets need to compile for lite
- if (NOT args_EXCLUDE_COMPILE_DEPS)
- add_dependencies(lite_compile_deps ${TARGET})
- endif()
-endfunction()
-
-set(arm_kernels CACHE INTERNAL "arm kernels")
-set(x86_kernels CACHE INTERNAL "x86 kernels")
-set(fpga_kernels CACHE INTERNAL "fpga kernels")
-set(npu_kernels CACHE INTERNAL "npu kernels")
-set(opencl_kernels CACHE INTERNAL "opencl kernels")
-set(host_kernels CACHE INTERNAL "host kernels")
-
-set(kernels_src_list "${CMAKE_BINARY_DIR}/kernels_src_list.txt")
-file(WRITE ${kernels_src_list} "") # clean
-# add a kernel for some specific device
-# device: one of (Host, ARM, X86, NPU, FPGA, OPENCL, CUDA)
-# level: one of (basic, extra)
-function(add_kernel TARGET device level)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS PROFILE_DEPS
- LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS
- ARGS)
- cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
-
- if ("${level}" STREQUAL "extra" AND (NOT LITE_BUILD_EXTRA))
- return()
- endif()
-
- if (LITE_ON_MODEL_OPTIMIZE_TOOL)
- # the source list will collect for model_optimize_tool to fake kernel generation.
- foreach(src ${args_SRCS})
- file(APPEND ${kernels_src_list} "${CMAKE_CURRENT_SOURCE_DIR}/${src}\n")
- endforeach()
- return()
- endif()
-
- # when compiling the model_optimize_tool, a source file with all the fake kernel definitions will be generated,
- # no need to continue the compilation of the true kernel source.
- if (LITE_ON_MODEL_OPTIMIZE_TOOL)
- return()
- endif(LITE_ON_MODEL_OPTIMIZE_TOOL)
-
-
- if ("${device}" STREQUAL "Host")
- set(host_kernels "${host_kernels};${TARGET}" CACHE INTERNAL "")
- endif()
- if ("${device}" STREQUAL "ARM")
- if (NOT LITE_WITH_ARM)
- return()
- endif()
- set(arm_kernels "${arm_kernels};${TARGET}" CACHE INTERNAL "")
- endif()
- if ("${device}" STREQUAL "X86")
- if (NOT LITE_WITH_X86)
- return()
- endif()
- set(x86_kernels "${x86_kernels};${TARGET}" CACHE INTERNAL "")
- endif()
- if ("${device}" STREQUAL "NPU")
- if (NOT LITE_WITH_NPU)
- return()
- endif()
- set(npu_kernels "${npu_kernels};${TARGET}" CACHE INTERNAL "")
- endif()
- if ("${device}" STREQUAL "FPGA")
- if (NOT LITE_WITH_FPGA)
- return()
- endif()
- set(fpga_kernels "${fpga_kernels};${TARGET}" CACHE INTERNAL "")
- endif()
- if ("${device}" STREQUAL "OPENCL")
- if (NOT LITE_WITH_OPENCL)
- return()
- endif()
- set(opencl_kernels "${opencl_kernels};${TARGET}" CACHE INTERNAL "")
- endif()
-
- # the source list will collect for paddle_use_kernel.h code generation.
- foreach(src ${args_SRCS})
- file(APPEND ${kernels_src_list} "${CMAKE_CURRENT_SOURCE_DIR}/${src}\n")
- endforeach()
-
- lite_cc_library(${TARGET} SRCS ${args_SRCS}
- DEPS ${args_DEPS}
- X86_DEPS ${args_X86_DEPS}
- CUDA_DEPS ${args_CUDA_DEPS}
- CL_DEPS ${args_CL_DEPS}
- ARM_DEPS ${args_ARM_DEPS}
- FPGA_DEPS ${args_FPGA_DEPS}
- PROFILE_DEPS ${args_PROFILE_DEPS}
- LIGHT_DEPS ${args_LIGHT_DEPS}
- HVY_DEPS ${args_HVY_DEPS}
- )
-endfunction()
-
-set(ops CACHE INTERNAL "ops")
-set(ops_src_list "${CMAKE_BINARY_DIR}/ops_src_list.txt")
-file(WRITE ${ops_src_list} "") # clean
-# add an operator
-# level: one of (basic, extra)
-function(add_operator TARGET level)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS PROFILE_DEPS
- LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS
- ARGS)
- cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
-
- if ("${level}" STREQUAL "extra" AND (NOT LITE_BUILD_EXTRA))
- return()
- endif()
-
- set(ops "${ops};${TARGET}" CACHE INTERNAL "source")
-
- foreach(src ${args_SRCS})
- file(APPEND ${ops_src_list} "${CMAKE_CURRENT_SOURCE_DIR}/${src}\n")
- endforeach()
-
- lite_cc_library(${TARGET} SRCS ${args_SRCS}
- DEPS ${args_DEPS}
- X86_DEPS ${args_X86_DEPS}
- CUDA_DEPS ${args_CUDA_DEPS}
- CL_DEPS ${args_CL_DEPS}
- ARM_DEPS ${args_ARM_DEPS}
- FPGA_DEPS ${args_FPGA_DEPS}
- PROFILE_DEPS ${args_PROFILE_DEPS}
- LIGHT_DEPS ${args_LIGHT_DEPS}
- HVY_DEPS ${args_HVY_DEPS}
- )
-endfunction()
-
-
-# Bundle several static libraries into one.
-function(bundle_static_library tgt_name bundled_tgt_name fake_target)
- list(APPEND static_libs ${tgt_name})
-
- function(_recursively_collect_dependencies input_target)
- set(_input_link_libraries LINK_LIBRARIES)
- get_target_property(_input_type ${input_target} TYPE)
- if (${_input_type} STREQUAL "INTERFACE_LIBRARY")
- set(_input_link_libraries INTERFACE_LINK_LIBRARIES)
- endif()
- get_target_property(public_dependencies ${input_target} ${_input_link_libraries})
- foreach(dependency IN LISTS public_dependencies)
- if(TARGET ${dependency})
- get_target_property(alias ${dependency} ALIASED_TARGET)
- if (TARGET ${alias})
- set(dependency ${alias})
- endif()
- get_target_property(_type ${dependency} TYPE)
- if (${_type} STREQUAL "STATIC_LIBRARY")
- list(APPEND static_libs ${dependency})
- endif()
-
- get_property(library_already_added
- GLOBAL PROPERTY _${tgt_name}_static_bundle_${dependency})
- if (NOT library_already_added)
- set_property(GLOBAL PROPERTY _${tgt_name}_static_bundle_${dependency} ON)
- _recursively_collect_dependencies(${dependency})
- endif()
- endif()
- endforeach()
- set(static_libs ${static_libs} PARENT_SCOPE)
- endfunction()
-
- _recursively_collect_dependencies(${tgt_name})
-
- list(REMOVE_DUPLICATES static_libs)
-
- set(bundled_tgt_full_name
- ${CMAKE_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${bundled_tgt_name}${CMAKE_STATIC_LIBRARY_SUFFIX})
-
- #message(STATUS "bundled_tgt_full_name: ${bundled_tgt_full_name}")
-
- if(NOT IOS)
- file(WRITE ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in
- "CREATE ${bundled_tgt_full_name}\n" )
-
- foreach(tgt IN LISTS static_libs)
- file(APPEND ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in
- "ADDLIB $\n")
- endforeach()
-
- file(APPEND ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in "SAVE\n")
- file(APPEND ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in "END\n")
-
- file(GENERATE
- OUTPUT ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar
- INPUT ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar.in)
-
- set(ar_tool ${CMAKE_AR})
- if (CMAKE_INTERPROCEDURAL_OPTIMIZATION)
- set(ar_tool ${CMAKE_CXX_COMPILER_AR})
- endif()
-
- add_custom_command(
- COMMAND ${ar_tool} -M < ${CMAKE_BINARY_DIR}/${bundled_tgt_name}.ar
- OUTPUT ${bundled_tgt_full_name}
- COMMENT "Bundling ${bundled_tgt_name}"
- VERBATIM)
- else()
- foreach(lib ${static_libs})
- set(libfiles ${libfiles} $)
- endforeach()
- add_custom_command(
- COMMAND /usr/bin/libtool -static -o ${bundled_tgt_full_name} ${libfiles}
- OUTPUT ${bundled_tgt_full_name}
- )
- endif()
-
- add_custom_target(${fake_target} ALL DEPENDS ${bundled_tgt_full_name})
- add_dependencies(${fake_target} ${tgt_name})
-
- add_library(${bundled_tgt_name} STATIC IMPORTED)
- set_target_properties(${bundled_tgt_name}
- PROPERTIES
- IMPORTED_LOCATION ${bundled_tgt_full_name}
- INTERFACE_INCLUDE_DIRECTORIES $)
- add_dependencies(${bundled_tgt_name} ${fake_target})
-
-endfunction()
diff --git a/cmake/lite_utils.cmake b/cmake/lite_utils.cmake
deleted file mode 100644
index f07ea859364949793d9d6a55c41c7415fefb4262..0000000000000000000000000000000000000000
--- a/cmake/lite_utils.cmake
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ----------------------------------------------------------------------------
-# section: Provides an paddle lite config option macro
-# usage: lite_option(var "help string to describe the var" [if or IF (condition)])
-# ----------------------------------------------------------------------------
-macro(lite_option variable description value)
- set(__value ${value})
- set(__condition "")
- set(__varname "__value")
- foreach(arg ${ARGN})
- if(arg STREQUAL "IF" OR arg STREQUAL "if")
- set(__varname "__condition")
- else()
- list(APPEND ${__varname} ${arg})
- endif()
- endforeach()
- unset(__varname)
- if(__condition STREQUAL "")
- set(__condition 2 GREATER 1)
- endif()
-
- if(${__condition})
- if(__value MATCHES ";")
- if(${__value})
- option(${variable} "${description}" ON)
- else()
- option(${variable} "${description}" OFF)
- endif()
- elseif(DEFINED ${__value})
- if(${__value})
- option(${variable} "${description}" ON)
- else()
- option(${variable} "${description}" OFF)
- endif()
- else()
- option(${variable} "${description}" ${__value})
- endif()
- else()
- unset(${variable} CACHE)
- endif()
- unset(__condition)
- unset(__value)
-endmacro()
diff --git a/cmake/make_resource.py b/cmake/make_resource.py
deleted file mode 100644
index 09a2ca877dd54243428ee2c730944f75ceeeaa30..0000000000000000000000000000000000000000
--- a/cmake/make_resource.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-import sys
-
-res = sys.argv[1]
-out = sys.argv[2]
-var = re.sub(r'[ .-]', '_', os.path.basename(res))
-
-open(out, "w").write("const unsigned char " + var + "[] = {" + ",".join([
- "0x%02x" % ord(c) for c in open(res).read()
-]) + ",0};\n" + "const unsigned " + var + "_size = sizeof(" + var + ");\n")
diff --git a/cmake/operators.cmake b/cmake/operators.cmake
deleted file mode 100644
index c17e718f4279f24c85db8be1177e5b5e82b13e08..0000000000000000000000000000000000000000
--- a/cmake/operators.cmake
+++ /dev/null
@@ -1,227 +0,0 @@
-set(PART_CUDA_KERNEL_FILES)
-function(op_library TARGET)
- # op_library is a function to create op library. The interface is same as
- # cc_library. But it handle split GPU/CPU code and link some common library
- # for ops.
- set(cc_srcs)
- set(cu_srcs)
- set(hip_cu_srcs)
- set(miopen_hip_cc_srcs)
- set(cu_cc_srcs)
- set(cudnn_cu_cc_srcs)
- set(CUDNN_FILE)
- set(mkldnn_cc_srcs)
- set(MKLDNN_FILE)
- set(op_common_deps operator op_registry math_function)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs SRCS DEPS)
- set(pybind_flag 0)
- cmake_parse_arguments(op_library "${options}" "${oneValueArgs}"
- "${multiValueArgs}" ${ARGN})
-
- list(LENGTH op_library_SRCS op_library_SRCS_len)
- if (${op_library_SRCS_len} EQUAL 0)
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cc)
- list(APPEND cc_srcs ${TARGET}.cc)
- endif()
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu.cc)
- list(APPEND cu_cc_srcs ${TARGET}.cu.cc)
- endif()
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu)
- list(APPEND cu_srcs ${TARGET}.cu)
- endif()
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu)
- set(PART_CUDA_KERNEL_FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu
- ${PART_CUDA_KERNEL_FILES} PARENT_SCOPE)
- list(APPEND cu_srcs ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu)
- endif()
-
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.hip.cu)
- list(APPEND hip_cu_srcs ${TARGET}.hip.cu)
- endif()
- string(REPLACE "_op" "_cudnn_op" CUDNN_FILE "${TARGET}")
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${CUDNN_FILE}.cu.cc)
- list(APPEND cudnn_cu_cc_srcs ${CUDNN_FILE}.cu.cc)
- endif()
- if(WITH_AMD_GPU)
- string(REPLACE "_op" "_miopen_op" MIOPEN_FILE "${TARGET}")
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MIOPEN_FILE}.hip.cc)
- list(APPEND miopen_hip_cc_srcs ${MIOPEN_FILE}.hip.cc)
- endif()
- endif()
- if(WITH_MKLDNN)
- string(REPLACE "_op" "_mkldnn_op" MKLDNN_FILE "${TARGET}")
- if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/${MKLDNN_FILE}.cc)
- list(APPEND mkldnn_cc_srcs mkldnn/${MKLDNN_FILE}.cc)
- endif()
- endif()
- else()
- foreach(src ${op_library_SRCS})
- if (${src} MATCHES ".*\\.hip.cu$")
- list(APPEND hip_cu_srcs ${src})
- elseif (${src} MATCHES ".*\\.cu$")
- list(APPEND cu_srcs ${src})
- elseif(${src} MATCHES ".*_cudnn_op.cu.cc$")
- list(APPEND cudnn_cu_cc_srcs ${src})
- elseif(WITH_AMD_GPU AND ${src} MATCHES ".*_miopen_op.hip.cc$")
- list(APPEND miopen_hip_cc_srcs ${src})
- elseif(WITH_MKLDNN AND ${src} MATCHES ".*_mkldnn_op.cc$")
- list(APPEND mkldnn_cc_srcs ${src})
- elseif(${src} MATCHES ".*\\.cu.cc$")
- list(APPEND cu_cc_srcs ${src})
- elseif(${src} MATCHES ".*\\.cc$")
- list(APPEND cc_srcs ${src})
- else()
- message(FATAL_ERROR "${TARGET} Source file ${src} should only be .cc or .cu")
- endif()
- endforeach()
- endif()
-
- list(LENGTH cc_srcs cc_srcs_len)
- if (${cc_srcs_len} EQUAL 0)
- message(FATAL_ERROR "The op library ${TARGET} should contains at least one .cc file")
- endif()
- if (WIN32)
- # remove windows unsupported op, because windows has no nccl, no warpctc such ops.
- foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op")
- if ("${TARGET}" STREQUAL "${windows_unsupport_op}")
- return()
- endif()
- endforeach()
- endif(WIN32)
- set(OP_LIBRARY ${TARGET} ${OP_LIBRARY} CACHE INTERNAL "op libs")
-
- list(LENGTH op_library_DEPS op_library_DEPS_len)
- if (${op_library_DEPS_len} GREATER 0)
- set(DEPS_OPS ${TARGET} ${DEPS_OPS} PARENT_SCOPE)
- endif()
- if (WITH_GPU)
- nv_library(${TARGET} SRCS ${cc_srcs} ${cu_cc_srcs} ${cudnn_cu_cc_srcs} ${mkldnn_cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS}
- ${op_common_deps})
- elseif (WITH_AMD_GPU)
- hip_library(${TARGET} SRCS ${cc_srcs} ${hip_cu_srcs} ${miopen_hip_cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS}
- ${op_common_deps})
- else()
- cc_library(${TARGET} SRCS ${cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS}
- ${op_common_deps})
- endif()
-
- # Define operators that don't need pybind here.
- foreach(manual_pybind_op "compare_op" "logical_op" "nccl_op"
-"tensor_array_read_write_op" "tensorrt_engine_op" "conv_fusion_op"
-"fusion_transpose_flatten_concat_op" "fusion_conv_inception_op" "sync_batch_norm_op" "dgc_op")
- if ("${TARGET}" STREQUAL "${manual_pybind_op}")
- set(pybind_flag 1)
- endif()
- endforeach()
-
- # The registration of USE_OP, please refer to paddle/fluid/framework/op_registry.h.
- # Note that it's enough to just adding one operator to pybind in a *_op.cc file.
- # And for detail pybind information, please see generated paddle/pybind/pybind.h.
- file(READ ${TARGET}.cc TARGET_CONTENT)
- string(REGEX MATCH "REGISTER_OPERATOR\\(.*REGISTER_OPERATOR\\(" multi_register "${TARGET_CONTENT}")
- string(REGEX MATCH "REGISTER_OPERATOR\\([a-z0-9_]*," one_register "${multi_register}")
- if (one_register STREQUAL "")
- string(REPLACE "_op" "" TARGET "${TARGET}")
- else ()
- string(REPLACE "REGISTER_OPERATOR(" "" TARGET "${one_register}")
- string(REPLACE "," "" TARGET "${TARGET}")
- endif()
-
- # pybind USE_NO_KERNEL_OP
- # HACK: if REGISTER_OP_CPU_KERNEL presents the operator must have kernel
- string(REGEX MATCH "REGISTER_OP_CPU_KERNEL" regex_result "${TARGET_CONTENT}")
- string(REPLACE "_op" "" TARGET "${TARGET}")
- if (${pybind_flag} EQUAL 0 AND regex_result STREQUAL "")
- file(APPEND ${pybind_file} "USE_NO_KERNEL_OP(${TARGET});\n")
- set(pybind_flag 1)
- endif()
-
- # pybind USE_CPU_ONLY_OP
- list(LENGTH cu_srcs cu_srcs_len)
- list(LENGTH cu_cc_srcs cu_cc_srcs_len)
- list(LENGTH mkldnn_cc_srcs mkldnn_cc_srcs_len)
- list(LENGTH hip_cu_srcs hip_cu_srcs_len)
- list(LENGTH miopen_hip_cc_srcs miopen_hip_cc_srcs_len)
- if (${pybind_flag} EQUAL 0 AND ${mkldnn_cc_srcs_len} EQUAL 0 AND ${cu_srcs_len} EQUAL 0 AND ${cu_cc_srcs_len} EQUAL 0 AND
- ${hip_cu_srcs_len} EQUAL 0 AND ${miopen_hip_cc_srcs_len} EQUAL 0)
- file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n")
- set(pybind_flag 1)
- endif()
-
- # pybind USE_OP_DEVICE_KERNEL for CUDNN
- list(LENGTH cudnn_cu_cc_srcs cudnn_cu_cc_srcs_len)
- if (WITH_GPU AND ${cudnn_cu_cc_srcs_len} GREATER 0)
- if(${TARGET} STREQUAL "activation")
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(relu, CUDNN);\n")
- else()
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, CUDNN);\n")
- endif()
- endif()
-
- # pybind USE_OP_DEVICE_KERNEL for MIOPEN
- if (WITH_AMD_GPU AND ${miopen_hip_cc_srcs_len} GREATER 0)
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MIOPEN);\n")
- endif()
-
- # pybind USE_OP_DEVICE_KERNEL for MKLDNN
- if (WITH_MKLDNN AND ${mkldnn_cc_srcs_len} GREATER 0)
- # Append first implemented MKLDNN activation operator
- if (${MKLDNN_FILE} STREQUAL "activation_mkldnn_op")
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(relu, MKLDNN);\n")
- elseif(${MKLDNN_FILE} STREQUAL "conv_mkldnn_op")
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN, FP32);\n")
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN, S8);\n")
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN, U8);\n")
-
- else()
- file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MKLDNN);\n")
- endif()
- endif()
-
- # pybind USE_OP
- if (${pybind_flag} EQUAL 0)
- # NOTE(*): activation use macro to regist the kernels, set use_op manually.
- if(${TARGET} STREQUAL "activation")
- file(APPEND ${pybind_file} "USE_OP(relu);\n")
- elseif(${TARGET} STREQUAL "fake_dequantize")
- file(APPEND ${pybind_file} "USE_OP(fake_dequantize_max_abs);\n")
- elseif(${TARGET} STREQUAL "fake_quantize")
- file(APPEND ${pybind_file} "USE_OP(fake_quantize_abs_max);\n")
- elseif(${TARGET} STREQUAL "tensorrt_engine_op")
- message(STATUS "Pybind skips [tensorrt_engine_op], for this OP is only used in inference")
- elseif(${TARGET} STREQUAL "fc")
- # HACK: fc only have mkldnn and cpu, which would mismatch the cpu only condition
- file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n")
- else()
- file(APPEND ${pybind_file} "USE_OP(${TARGET});\n")
- endif()
- endif()
-endfunction()
-
-
-function(register_operators)
- set(options "")
- set(oneValueArgs "")
- set(multiValueArgs EXCLUDES DEPS)
- cmake_parse_arguments(register_operators "${options}" "${oneValueArgs}"
- "${multiValueArgs}" ${ARGN})
-
- file(GLOB OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*_op.cc")
- string(REPLACE "_mkldnn" "" OPS "${OPS}")
- string(REPLACE ".cc" "" OPS "${OPS}")
- list(REMOVE_DUPLICATES OPS)
- list(LENGTH register_operators_DEPS register_operators_DEPS_len)
-
- foreach(src ${OPS})
- list(FIND register_operators_EXCLUDES ${src} _index)
- if (${_index} EQUAL -1)
- if (${register_operators_DEPS_len} GREATER 0)
- op_library(${src} DEPS ${register_operators_DEPS})
- else()
- op_library(${src})
- endif()
- endif()
- endforeach()
-endfunction()
diff --git a/cmake/package.cmake b/cmake/package.cmake
deleted file mode 100644
index 79e02147f3f7cc19c1bf45d8a1d208a9a32416ff..0000000000000000000000000000000000000000
--- a/cmake/package.cmake
+++ /dev/null
@@ -1,21 +0,0 @@
-set(CPACK_PACKAGE_NAME paddle)
-set(CPACK_PACKAGE_VERSION_MAJOR ${PADDLE_MAJOR_VERSION})
-set(CPACK_PACKAGE_VERSION_MINOR ${PADDLE_MINOR_VERSION})
-set(CPACK_PACKAGE_VERSION_PATCH ${PADDLE_PATCH_VERSION})
-set(CPACK_PACKAGE_VERSION ${PADDLE_VERSION})
-## DEB Settings
-set(CPACK_DEBIAN_PACKAGE_NAME paddle)
-set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE amd64)
-set(CPACK_DEBIAN_PACKAGE_MAINTAINER PaddlePaddle Dev )
-set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Paddle")
-set(CPACK_PACKAGE_DESCRIPTION "")
-set(CPACK_DEBIAN_PACKAGE_DEPENDS "libpython2.7-dev, libstdc++6, python-pip, curl, libgfortran3, python-pip-whl")
-set(CPACK_DEBIAN_PACKAGE_SECTION Devel)
-set(CPACK_DEBIAN_PACKAGE_VERSION ${PADDLE_VERSION})
-set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${PADDLE_SOURCE_DIR}/paddle/scripts/deb/postinst")
-#set(CPACK_GENERATOR "DEB")
-# Start cpack
-include (CMakePackageConfigHelpers)
-include (CPack)
-
-
diff --git a/cmake/simd.cmake b/cmake/simd.cmake
deleted file mode 100644
index 566dc75fda019eb66759eb403f60e16f18cffef1..0000000000000000000000000000000000000000
--- a/cmake/simd.cmake
+++ /dev/null
@@ -1,99 +0,0 @@
-# This file is use to check all support level of AVX on your machine
-# so that PaddlePaddle can unleash the vectorization power of muticore.
-
-include(CheckCXXSourceRuns)
-include(CheckCXXSourceCompiles)
-
-if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
- set(MMX_FLAG "-mmmx")
- set(SSE2_FLAG "-msse2")
- set(SSE3_FLAG "-msse3")
- set(AVX_FLAG "-mavx")
- set(AVX2_FLAG "-mavx2")
- set(AVX512F_FLAG "-mavx512f")
-elseif(MSVC)
- set(MMX_FLAG "/arch:MMX")
- set(SSE2_FLAG "/arch:SSE2")
- set(SSE3_FLAG "/arch:SSE3")
- SET(AVX_FLAG "/arch:AVX")
- SET(AVX2_FLAG "/arch:AVX2")
-endif()
-
-set(CMAKE_REQUIRED_FLAGS_RETAINED ${CMAKE_REQUIRED_FLAGS})
-
-# Check MMX
-set(CMAKE_REQUIRED_FLAGS ${MMX_FLAG})
-set(MMX_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE)
-CHECK_CXX_SOURCE_RUNS("
-#include
-int main()
-{
- _mm_setzero_si64();
- return 0;
-}" MMX_FOUND)
-
-# Check SSE2
-set(CMAKE_REQUIRED_FLAGS ${SSE2_FLAG})
-set(SSE2_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE)
-CHECK_CXX_SOURCE_RUNS("
-#include
-int main()
-{
- _mm_setzero_si128();
- return 0;
-}" SSE2_FOUND)
-
-# Check SSE3
-set(CMAKE_REQUIRED_FLAGS ${SSE3_FLAG})
-set(SSE3_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE)
-CHECK_CXX_SOURCE_RUNS("
-#include
-int main()
-{
- __m128d a = _mm_set1_pd(6.28);
- __m128d b = _mm_set1_pd(3.14);
- __m128d result = _mm_addsub_pd(a, b);
- result = _mm_movedup_pd(result);
- return 0;
-}" SSE3_FOUND)
-
-# Check AVX
-set(CMAKE_REQUIRED_FLAGS ${AVX_FLAG})
-set(AVX_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE)
-CHECK_CXX_SOURCE_RUNS("
-#include
-int main()
-{
- __m256 a = _mm256_set_ps (-1.0f, 2.0f, -3.0f, 4.0f, -1.0f, 2.0f, -3.0f, 4.0f);
- __m256 b = _mm256_set_ps (1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f);
- __m256 result = _mm256_add_ps (a, b);
- return 0;
-}" AVX_FOUND)
-
-# Check AVX 2
-set(CMAKE_REQUIRED_FLAGS ${AVX2_FLAG})
-set(AVX2_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE)
-CHECK_CXX_SOURCE_RUNS("
-#include
-int main()
-{
- __m256i a = _mm256_set_epi32 (-1, 2, -3, 4, -1, 2, -3, 4);
- __m256i result = _mm256_abs_epi32 (a);
- return 0;
-}" AVX2_FOUND)
-
-# Check AVX512F
-set(CMAKE_REQUIRED_FLAGS ${AVX512F_FLAG})
-set(AVX512F_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE)
-CHECK_CXX_SOURCE_RUNS("
-#include
-int main()
-{
- __m512i a = _mm512_set_epi32 (-1, 2, -3, 4, -1, 2, -3, 4,
- 13, -5, 6, -7, 9, 2, -6, 3);
- __m512i result = _mm512_abs_epi32 (a);
- return 0;
-}" AVX512F_FOUND)
-
-set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_RETAINED})
-mark_as_advanced(MMX_FOUND SSE2_FOUND SSE3_FOUND AVX_FOUND AVX2_FOUND AVX512F_FOUND)
diff --git a/cmake/system.cmake b/cmake/system.cmake
deleted file mode 100644
index ba00df928a0c52bfe05f4d3f6d7af2a50d2576f9..0000000000000000000000000000000000000000
--- a/cmake/system.cmake
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Detects the OS and sets appropriate variables.
-# CMAKE_SYSTEM_NAME only give us a coarse-grained name of the OS CMake is
-# building for, but the host processor name like centos is necessary
-# in some scenes to distinguish system for customization.
-#
-# for instance, protobuf libs path is /lib64
-# on CentOS, but /lib on other systems.
-
-IF(WIN32)
- SET(HOST_SYSTEM "win32")
-ELSE(WIN32)
- IF(APPLE)
- SET(HOST_SYSTEM "macosx")
- EXEC_PROGRAM(sw_vers ARGS -productVersion OUTPUT_VARIABLE HOST_SYSTEM_VERSION)
- STRING(REGEX MATCH "[0-9]+.[0-9]+" MACOS_VERSION "${HOST_SYSTEM_VERSION}")
- IF(NOT DEFINED $ENV{MACOSX_DEPLOYMENT_TARGET})
- # Set cache variable - end user may change this during ccmake or cmake-gui configure.
- SET(CMAKE_OSX_DEPLOYMENT_TARGET ${MACOS_VERSION} CACHE STRING
- "Minimum OS X version to target for deployment (at runtime); newer APIs weak linked. Set to empty string for default value.")
- ENDIF()
- IF(ARM_TARGET_OS STREQUAL "android" OR ARM_TARGET_OS STREQUAL "armlinux"
- OR ARM_TARGET_OS STREQUAL "ios" OR ARM_TARGET_OS STREQUAL "ios64")
- ELSE()
- set(CMAKE_EXE_LINKER_FLAGS "-framework CoreFoundation -framework Security")
- ENDIF()
- ELSE(APPLE)
-
- IF(EXISTS "/etc/issue")
- FILE(READ "/etc/issue" LINUX_ISSUE)
- IF(LINUX_ISSUE MATCHES "CentOS")
- SET(HOST_SYSTEM "centos")
- ELSEIF(LINUX_ISSUE MATCHES "Debian")
- SET(HOST_SYSTEM "debian")
- ELSEIF(LINUX_ISSUE MATCHES "Ubuntu")
- SET(HOST_SYSTEM "ubuntu")
- ELSEIF(LINUX_ISSUE MATCHES "Red Hat")
- SET(HOST_SYSTEM "redhat")
- ELSEIF(LINUX_ISSUE MATCHES "Fedora")
- SET(HOST_SYSTEM "fedora")
- ENDIF()
-
- STRING(REGEX MATCH "(([0-9]+)\\.)+([0-9]+)" HOST_SYSTEM_VERSION "${LINUX_ISSUE}")
- ENDIF(EXISTS "/etc/issue")
-
- IF(EXISTS "/etc/redhat-release")
- FILE(READ "/etc/redhat-release" LINUX_ISSUE)
- IF(LINUX_ISSUE MATCHES "CentOS")
- SET(HOST_SYSTEM "centos")
- ENDIF()
- ENDIF(EXISTS "/etc/redhat-release")
-
- IF(NOT HOST_SYSTEM)
- SET(HOST_SYSTEM ${CMAKE_SYSTEM_NAME})
- ENDIF()
-
- ENDIF(APPLE)
-ENDIF(WIN32)
-
-# query number of logical cores
-CMAKE_HOST_SYSTEM_INFORMATION(RESULT CPU_CORES QUERY NUMBER_OF_LOGICAL_CORES)
-
-MARK_AS_ADVANCED(HOST_SYSTEM CPU_CORES)
-
-MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}, version: ${HOST_SYSTEM_VERSION}")
-MESSAGE(STATUS "Found Paddle host system's CPU: ${CPU_CORES} cores")
-
-# external dependencies log output
-SET(EXTERNAL_PROJECT_LOG_ARGS
- LOG_DOWNLOAD 0 # Wrap download in script to log output
- LOG_UPDATE 1 # Wrap update in script to log output
- LOG_CONFIGURE 1 # Wrap configure in script to log output
- LOG_BUILD 0 # Wrap build in script to log output
- LOG_TEST 1 # Wrap test in script to log output
- LOG_INSTALL 0 # Wrap install in script to log output
-)
diff --git a/cmake/tensorrt.cmake b/cmake/tensorrt.cmake
deleted file mode 100644
index 3bf12094e4c32e69f908cbe6cefc7871fc9bb568..0000000000000000000000000000000000000000
--- a/cmake/tensorrt.cmake
+++ /dev/null
@@ -1,38 +0,0 @@
-if(NOT WITH_GPU)
- return()
-endif()
-
-set(TENSORRT_ROOT "/usr" CACHE PATH "TENSORRT ROOT")
-find_path(TENSORRT_INCLUDE_DIR NvInfer.h
- PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/include
- $ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/include
- NO_DEFAULT_PATH
-)
-
-find_library(TENSORRT_LIBRARY NAMES libnvinfer.so libnvinfer.a
- PATHS ${TENSORRT_ROOT} ${TENSORRT_ROOT}/lib
- $ENV{TENSORRT_ROOT} $ENV{TENSORRT_ROOT}/lib
- NO_DEFAULT_PATH
- DOC "Path to TensorRT library.")
-
-if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY)
- if(WITH_DSO)
- set(TENSORRT_FOUND ON)
- endif(WITH_DSO)
-else()
- set(TENSORRT_FOUND OFF)
-endif()
-
-if(TENSORRT_FOUND)
- file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
- string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
- "${TENSORRT_VERSION_FILE_CONTENTS}")
- string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
- TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
-
- message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
- "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
- include_directories(${TENSORRT_INCLUDE_DIR})
- link_directories(${TENSORRT_LIBRARY})
- add_definitions(-DPADDLE_WITH_TENSORRT)
-endif()
diff --git a/cmake/util.cmake b/cmake/util.cmake
deleted file mode 100644
index 02667dbce69ed159193ff88f38069dd08cdcf678..0000000000000000000000000000000000000000
--- a/cmake/util.cmake
+++ /dev/null
@@ -1,55 +0,0 @@
-# Some common routine for paddle compile.
-
-# target_circle_link_libraries
-# Link libraries to target which has circle dependencies.
-#
-# First Argument: target name want to be linked with libraries
-# Rest Arguments: libraries which link together.
-function(target_circle_link_libraries TARGET_NAME)
- if(APPLE)
- set(LIBS)
- set(inArchive OFF)
- set(libsInArgn)
-
- foreach(arg ${ARGN})
- if(${arg} STREQUAL "ARCHIVE_START")
- set(inArchive ON)
- elseif(${arg} STREQUAL "ARCHIVE_END")
- set(inArchive OFF)
- else()
- if(inArchive)
- list(APPEND LIBS "-Wl,-force_load")
- endif()
- list(APPEND LIBS ${arg})
- list(APPEND libsInArgn ${arg})
- endif()
- endforeach()
- if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
- if(NOT IOS_ENABLE_BITCODE)
- list(APPEND LIBS "-undefined dynamic_lookup")
- endif()
- endif()
- list(REVERSE libsInArgn)
- target_link_libraries(${TARGET_NAME}
- ${LIBS}
- ${libsInArgn})
-
- else() # LINUX
- set(LIBS)
-
- foreach(arg ${ARGN})
- if(${arg} STREQUAL "ARCHIVE_START")
- list(APPEND LIBS "-Wl,--whole-archive")
- elseif(${arg} STREQUAL "ARCHIVE_END")
- list(APPEND LIBS "-Wl,--no-whole-archive")
- else()
- list(APPEND LIBS ${arg})
- endif()
- endforeach()
-
- target_link_libraries(${TARGET_NAME}
- "-Wl,--start-group"
- ${LIBS}
- "-Wl,--end-group")
- endif()
-endfunction()
diff --git a/cmake/version.cmake b/cmake/version.cmake
deleted file mode 100644
index 8bcc4ffe725b8241dc9d91205ce2d9897917eefd..0000000000000000000000000000000000000000
--- a/cmake/version.cmake
+++ /dev/null
@@ -1,66 +0,0 @@
-# Get the latest git tag.
-set(PADDLE_VERSION $ENV{PADDLE_VERSION})
-set(tmp_version "HEAD")
-set(TAG_VERSION_REGEX "[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?")
-set(COMMIT_VERSION_REGEX "[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+")
-# set(LATEST_PADDLE_VERSION "latest")
-set(LATEST_PADDLE_VERSION "0.0.0")
-
-while ("${PADDLE_VERSION}" STREQUAL "")
- # Check current branch name
- execute_process(
- COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref ${tmp_version}
- WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
- OUTPUT_VARIABLE GIT_BRANCH_NAME
- RESULT_VARIABLE GIT_BRANCH_RESULT
- ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
- if (NOT ${GIT_BRANCH_RESULT})
- execute_process(
- COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 --always ${tmp_version}
- WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
- OUTPUT_VARIABLE GIT_TAG_NAME
- RESULT_VARIABLE GIT_RESULT
- ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
- if (NOT ${GIT_RESULT})
- # Check if current branch is release branch
- if (${GIT_BRANCH_NAME} MATCHES "release/${TAG_VERSION_REGEX}")
- # Check the tag is a correct version
- if (${GIT_TAG_NAME} MATCHES "${COMMIT_VERSION_REGEX}")
- # if no tag was found, set PADDLE_VERSION to "latest"
- set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}")
- elseif (${GIT_TAG_NAME} MATCHES "v${TAG_VERSION_REGEX}")
- string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME})
- else() # otherwise, get the previous git tag name.
- set(tmp_version "${GIT_TAG_NAME}~1")
- endif()
- else()
- execute_process(
- COMMAND ${GIT_EXECUTABLE} describe --exact-match --tags ${tmp_version}
- WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
- OUTPUT_VARIABLE GIT_EXACT_TAG_NAME
- RESULT_VARIABLE GIT_EXACT_TAG_RESULT
- ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
- if (NOT ${GIT_EXACT_TAG_NAME})
- # Check if current branch is tag branch
- if (${GIT_EXACT_TAG_NAME} MATCHES "v${TAG_VERSION_REGEX}")
- string(REPLACE "v" "" PADDLE_VERSION ${GIT_EXACT_TAG_NAME})
- else()
- set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}")
- endif()
- else()
- # otherwise, we always set PADDLE_VERSION to "latest"
- set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}")
- endif()
- endif()
- else()
- set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}")
- message(WARNING "Cannot add paddle version from git tag")
- endif()
- else()
- set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}")
- message(WARNING "Cannot add paddle version for wrong git branch result")
- endif()
-endwhile()
-
-add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION})
-message(STATUS "Paddle version is ${PADDLE_VERSION}")
diff --git a/cpp_demo.md b/cpp_demo.md
new file mode 100644
index 0000000000000000000000000000000000000000..bfb34399980db35d5834cf8b9a8c89fce34e26b6
--- /dev/null
+++ b/cpp_demo.md
@@ -0,0 +1,271 @@
+
+* [C++ Demo](#c-demo)
+ * [编译](#编译-1)
+ * [准备执行环境](#准备执行环境)
+ * [使用安卓手机](#使用安卓手机)
+ * [使用安卓模拟器](#使用安卓模拟器)
+ * [下载模型并运行示例](#下载模型并运行示例)
+ * [Demo 程序运行结果](#demo-程序运行结果)
+ * [如何在代码中使用 API](#如何在代码中使用-api)
+
+
+
+
+
+# `C++` Demo
+
+## 编译
+
+首先按照[PaddleLite 源码编译](https://github.com/PaddlePaddle/Paddle-Lite/wiki/source_compile)准备交叉编译环境,之后拉取最新[PaddleLite release发布版代码](https://github.com/PaddlePaddle/Paddle-Lite)。下面以Android-ARMv8架构为例,介绍编译过程,并最终在手机上跑通MobilNetv1模型。
+
+进入 Paddle-Lite 目录,运行以下命令编译代码(**需加编译选项`--build_extra=ON`确保完整编译**):
+
+```shell
+./lite/tools/build.sh \
+ --arm_os=android \
+ --arm_abi=armv8 \
+ --arm_lang=gcc \
+ --android_stl=c++_static \
+ --build_extra=ON \
+ full_publish
+```
+
+编译完成后 `./build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/` 文件夹下包含:
+
+```bash
+cxx/include/
+cxx/lib/libpaddle_api_full_bundled.a
+cxx/lib/libpaddle_api_light_bundled.a
+demo/cxx/ #其中包括{include Makefile.def mobile_light}
+third_party/gflags/
+```
+
+## 准备执行环境
+
+执行环境有两种:使用安卓手机;若没安卓手机,也可在安卓模拟器中执行。
+
+### 环境一:使用安卓手机
+
+将手机连上电脑,在手机上打开选项 -> 开启-开发者模式 -> 开启-USB调试模式。确保 `adb devices` 能够看到相应的设备。
+
+### 环境二:使用安卓模拟器
+
+运行下面命令,分别创建安卓armv8、armv7架构的模拟器。若需在真机测试,将模拟器换成相应架构的真机环境即可。
+
+```shell
+# android-armv8
+adb kill-server
+adb devices | grep emulator | cut -f1 | while read line; do adb -s $line emu kill; done
+echo n | avdmanager create avd -f -n paddle-armv8 -k "system-images;android-24;google_apis;arm64-v8a"
+echo -ne '\n' | ${ANDROID_HOME}/emulator/emulator -avd paddle-armv8 -noaudio -no-window -gpu off -port 5554 &
+sleep 1m
+```
+
+```shell
+# android-armv7
+adb kill-server
+adb devices | grep emulator | cut -f1 | while read line; do adb -s $line emu kill; done
+echo n | avdmanager create avd -f -n paddle-armv7 -k "system-images;android-24;google_apis;armeabi-v7a"
+echo -ne '\n' | ${ANDROID_HOME}/emulator/emulator -avd paddle-armv7 -noaudio -no-window -gpu off -port 5554 &
+sleep 1m
+```
+
+## 下载模型并运行示例
+
+```bash
+cd inference_lite_lib.android.armv8/demo/cxx/mobile_full
+wget http://paddle-inference-dist.bj.bcebos.com/mobilenet_v1.tar.gz
+tar zxvf mobilenet_v1.tar.gz
+
+make
+
+adb -s emulator-5554 push mobilenet_v1 /data/local/tmp/
+adb -s emulator-5554 push mobilenetv1_full_api /data/local/tmp/
+adb -s emulator-5554 shell chmod +x /data/local/tmp/mobilenetv1_full_api
+adb -s emulator-5554 shell "/data/local/tmp/mobilenetv1_full_api --model_dir=/data/local/tmp/mobilenet_v1 --optimized_model_dir=/data/local/tmp/mobilenet_v1.opt"
+```
+注:我们也提供了轻量级 API 的 demo,可以执行以下代码运行轻量级 API 示例。
+
+```bash
+cd ../mobile_light
+make
+adb -s emulator-5554 push mobilenetv1_light_api /data/local/tmp/
+adb -s emulator-5554 shell chmod +x /data/local/tmp/mobilenetv1_light_api
+adb -s emulator-5554 shell "/data/local/tmp/mobilenetv1_light_api --model_dir=/data/local/tmp/mobilenet_v1.opt --threads=1 "
+```
+## Demo 程序运行结果
+Demo 运行成功后 ,将在控制台输出预测结果的前10个类别的预测概率:
+
+```bash
+Output dim: 1000
+Output[0]: 0.000191
+Output[100]: 0.000160
+Output[200]: 0.000264
+Output[300]: 0.000211
+Output[400]: 0.001032
+Output[500]: 0.000110
+Output[600]: 0.004829
+Output[700]: 0.001845
+Output[800]: 0.000202
+Output[900]: 0.000586
+```
+
+## 如何在代码中使用 API
+
+在C++中使用PaddleLite API非常简单,不需要添加太多额外代码,具体步骤如下:
+
+- 加入头文件引用
+
+```cpp
+ #include
+ #include
+ #include "paddle_api.h"
+ #include "paddle_use_kernels.h"
+ #include "paddle_use_ops.h"
+ #include "paddle_use_passes.h"
+```
+
+- 通过MobileConfig设置:模型文件位置(model_dir)、线程数(thread)和能耗模式( power mode )。输入数据(input),从 MobileConfig 创建 PaddlePredictor 并执行预测。 (注:Lite还支持从memory直接加载模型,可以通过MobileConfig::set_model_buffer方法实现)
+
+代码示例:
+```cpp
+ // 1. Create MobileConfig
+ MobileConfig config;
+
+ // 2. Load model
+ config.set_model_dir("path to your model directory"); //model dir
+ /*load model: Lite supports loading model from file or from memory (naive buffer from optimized model)
+ //Method One: Load model from memory:
+ void set_model_buffer(const char* model_buffer,
+ size_t model_buffer_size,
+ const char* param_buffer,
+ size_t param_buffer_size)
+ //Method Two: Load model from file:
+ void set_model_dir(const std::string& model_dir) */
+
+ // 3. Set MobileConfig (or you can skip this step to use default value):
+ config.set_power_mode(LITE_POWER_HIGH); //power mode
+ /*power modes: Lite supports the following power modes
+ LITE_POWER_HIGH
+ LITE_POWER_LOW
+ LITE_POWER_FULL
+ LITE_POWER_NO_BIND
+ LITE_POWER_RAND_HIGH
+ LITE_POWER_RAND_LOW */
+ config.set_threads("num of threads"); //threads
+
+ // 4. Create PaddlePredictor by MobileConfig
+ std::shared_ptr predictor =
+ CreatePaddlePredictor(config);
+
+ // 5. Prepare input data
+ std::unique_ptr input_tensor(std::move(predictor->GetInput(0)));
+ input_tensor->Resize({1, 3, 224, 224});
+ auto* data = input_tensor->mutable_data();
+ for (int i = 0; i < ShapeProduction(input_tensor->shape()); ++i) {
+ data[i] = 1;
+ }
+
+ // 6. Run predictor
+ predictor->Run();
+
+ // 7. Get output
+ std::unique_ptr output_tensor(
+ std::move(predictor->GetOutput(0)));
+```
+
+## CxxConfig案例: OCR_model的运行
+
+1. OCR 模型文件:
+ - 我们提供Pb格式的[ocr_attention_mode](https://paddle-inference-dist.cdn.bcebos.com/ocr_attention.tar.gz)l下载
+ - 也可以从[Paddle/model项目](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/ocr_recognition)中训练出模型
+
+2. 示例代码:
+```c++
+#include
+#include
+#include
+#include "paddle_api.h" // NOLINT
+#include "paddle_use_kernels.h" // NOLINT
+#include "paddle_use_ops.h" // NOLINT
+#include "paddle_use_passes.h" // NOLINT
+using namespace paddle::lite_api; // NOLINT
+
+DEFINE_string(model_dir, "", "Model dir path.");
+DEFINE_bool(prefer_int8_kernel, false, "Prefer to run model with int8 kernels");
+
+int64_t ShapeProduction(const shape_t& shape) {
+ int64_t res = 1;
+ for (auto i : shape) res *= i;
+ return res;
+}
+
+void RunModel() {
+ // 1. Set CxxConfig
+ CxxConfig config;
+ config.set_model_dir(FLAGS_model_dir);
+ std::vector valid_places{Place{TARGET(kARM), PRECISION(kFloat)},Place{TARGET(kHost), PRECISION(kFloat)}};
+ config.set_preferred_place(Place{TARGET(kARM), PRECISION(kFloat)});
+ config.set_valid_places(valid_places);
+
+ // 2. Create PaddlePredictor by CxxConfig
+ std::shared_ptr predictor =
+ CreatePaddlePredictor(config);
+
+ // 3. Prepare input data
+ //input 0
+ std::unique_ptr input_tensor(std::move(predictor->GetInput(0)));
+ input_tensor->Resize(shape_t({1,1,48,512}));
+ auto* data = input_tensor->mutable_data();
+ for(int i = 0; i < ShapeProduction(input_tensor->shape()); ++i){
+ data[i] = 1;
+ }
+ //input1
+ std::unique_ptr init_ids(std::move(predictor->GetInput(1)));
+ init_ids->Resize(shape_t({1,1}));
+ auto* data_ids = init_ids->mutable_data();
+ for(int i = 0; i < ShapeProduction(init_ids->shape()); ++i){
+ data_ids[i] = 0;
+ }
+ lod_t lod_i{{0,1},{0,1}};
+ init_ids->SetLoD(lod_i);
+ //input2
+ std::unique_ptr init_scores(std::move(predictor->GetInput(2)));
+ init_scores->Resize(shape_t({1,1}));
+ auto* data_scores = init_scores->mutable_data();
+ for(int i = 0; i < ShapeProduction(init_scores->shape()); ++i){
+ data_scores[i] = 0;
+ }
+ lod_t lod_s{{0,1},{0,1}};
+ init_scores->SetLoD(lod_s);
+
+
+ // 4. Run predictor
+ predictor->Run();
+
+ // 5. Get output
+ std::unique_ptr output_tensor(
+ std::move(predictor->GetOutput(0)));
+ for (int i = 0; i < ShapeProduction(output_tensor->shape()); i ++) {
+ printf("Output[%d]: %f\n", i, output_tensor->data()[i]);
+ }
+}
+
+int main(int argc, char** argv) {
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ RunModel();
+ return 0;
+}
+```
+3. 运行方法:
+参考以上代码编译出可执行文件`OCR_DEMO`,模型文件夹为`ocr_attention`。手机以USB调试、文件传输模式连接电脑
+在终端中输入以下命令执行OCR model测试:
+```
+#OCR_DEMO为编译出的可执行文件名称,ocr_attention为ocr_attention模型的文件夹名称
+adb push OCR_DEMO data/local/tmp
+adb push ocr_attention data/local/tmp
+adb shell 'cd data/local/tmp && ./OCR_DEMO --model_dir=./OCR_DEMO'
+```
+4. 运行结果
+
+
\ No newline at end of file
diff --git a/cxx_api.md b/cxx_api.md
new file mode 100644
index 0000000000000000000000000000000000000000..a05b2d3d697994c996e5da0989fac9d4b3a84961
--- /dev/null
+++ b/cxx_api.md
@@ -0,0 +1,63 @@
+# C++ API接口使用指南
+
+请参考[源码编译](./source_compile)确保 Lite 可以正确编译,下面用Lite的c++接口加载并执行 MobileNetV1 模型为例,详细说明使用方法。
+
+## 准备模型
+
+Lite支持PaddlePaddle训练好的模型,MobileNetV1模型可以由以下三种方式得到:
+
+- 直接下载训练好的[MobileNetV1模型](https://paddle-inference-dist.bj.bcebos.com/mobilenet_v1.tar.gz)
+- 使用[PaddlePaddle](https://paddlepaddle.org.cn/)构建MobileNetV1网络并训练
+- 使用[X2Paddle](./x2paddle)对caffe或者tensorflow的MobileNetV1模型进行转换得到
+
+## 模型优化
+
+使用Model Optimize Tool优化模型,使得模型预测过程表现出优异的性能。Model Optimize Tool的具体使用方法请参考[文档](./model_optimize_tool)。
+
+- 准备model_optimize_tool
+- 使用model_optimize_tool优化模型
+- 得到优化后的模型,包括__model__.nb文件和param.nb文件
+
+## 加载模型
+
+加载MobileNetV1网络模型,创建predictor,具体可以参考```paddlelite/lite/api/model_test.cc```文件。
+```c++
+lite::DeviceInfo::Init();
+lite::DeviceInfo::Global().SetRunMode(lite::LITE_POWER_HIGH, thread_num);
+lite_api::MobileConfig config;
+config.set_model_dir(model_dir);
+
+auto predictor = lite_api::CreatePaddlePredictor(config);
+```
+
+## 设定输入
+
+得到input_tensor,设置输入值,此处我们设定为全1
+
+```cpp
+// 获取第 j 个 tensor 的句柄
+auto input_tensor = predictor->GetInput(j);
+input_tensor->Resize(input_shapes[j]);
+
+// 获取数据指针,以塞入数据
+auto input_data = input_tensor->mutable_data();
+int input_num = 1;
+for (int i = 0; i < input_shapes[j].size(); ++i) {
+ input_num *= input_shapes[j][i];
+}
+for (int i = 0; i < input_num; ++i) {
+ input_data[i] = 1.f;
+}
+```
+
+## 执行并输出
+
+```cpp
+predictor.Run();
+auto* out = predictor.GetOutput(0);
+LOG(INFO) << "dims " << out->dims();
+LOG(INFO) << "out data size: " << out->data_size();
+```
+
+输出为```dims dims{1000,}, out data size: 1000```
+
diff --git a/debug_tools.md b/debug_tools.md
new file mode 100644
index 0000000000000000000000000000000000000000..b904fdcd710af8e33bb4e3a8655dc3cca7699d38
--- /dev/null
+++ b/debug_tools.md
@@ -0,0 +1,77 @@
+# Debug tools
+
+**Lite Model Debug Tool** 是用来检查Paddle-Lite框架与Paddle-Fluid框架运行时tensor(包括variable与weight)之间diff信息的基础工具。
+
+## 工作流程:
+
+1. 运行 `/bin/bash check_model.sh --model_dir= --build_root_dir= debug_cpp_stage` 获得模型在Paddle-Lite框架下的运行拓扑信息、varibles信息和weights信息。运行后拓扑信息将会存储在默认名为 `topo_file.txt` 的文件中,variables和weights信息将会存储在默认名为 `tensor_cpp.txt` 的文件中。
+2. 运行 `/bin/bash check_model.sh --model_dir= --build_root_dir= debug_py_stage`执行fluid框架预测以获取相同模型在fluid框架下的variable与weight信息(注意:我们使用fluid的python api运行fluid模型,因此您在运行此步之前应确保已正确安装fluid的python api)。然后debug tool将会自动比较Paddle-Lite框架输出的信息和Paddle-Fluid框架输出的信息来检查是否存在运行时diff。 执行Paddle-Fluid框架,输出的信息将会存储在默认名为 `tensor_py.txt` 的文件中,相应的diff信息将会存储在默认名为 `diff.txt`的文件中(默认情况下,只会输出执行拓扑序中第一个有diff的variable相关的信息)。
+
+## 注意事项:
+
+1. 输出的结果是在**执行完一次预测后**输出的相应变量/权重的最终值,因此如果您在预测过程进行过诸如变量复用/子图融合等优化方法,则相应的输出可能会出现偏差。
+2. 默认情况下debug tools将以全1作为输入进行比对。
+3. 默认情况下,为了保证与Paddle-Fluid框架的结果可比对,debug tool将会禁用掉所有的Paddle-Lite的优化策略。
+4. Paddle-Lite框架的执行环境由与您的编译选项有关,比如您开启了LITE_WITH_ARM编译选项,那debug tool的`debug_cpp_stage`也需要在ARM平台下运行。
+
+## Diff信息输出:
+
+如果debug tool检测到diff信息,那么在`diff.txt`中将会输出类似以下结构信息
+
+```c++
+>>>>>>>>>>>>>>>>>>DIFF VARIABLE: dropout_0.tmp_0<<<<<<<<<<<<<<<<<<<
+dropout (X:pool2d_7.tmp_0) (Mask:dropout_0.tmp_1 Out:dropout_0.tmp_0)
+--------------- Tensor File info ---------------
+pool2d_7.tmp_0 {1,1536,1,1} 0.749892 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0150336 0.621641 0.147099 0.636727 0.0 0.0 0.00410917 0.784708 0.0 0.0704846 0.233599 0.840123 0.239201 0.112878 0.0 0.155352 0.306906 0.0 0.0 0.860938 0.221037 0.787316 0.256585 ...
+dropout_0.tmp_0 {1,1536,1,1} 0.749892 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0150336 0.621641 0.147099 0.636727 0.0 0.0 0.00410917 0.784708 0.0 0.0704846 0.233599 0.840123 0.239201 0.112878 0.0 0.155352 0.306906 0.0 0.0 0.860938 0.221037 0.787316 0.256585 ...
+--------------- Fluid Tensor info ---------------
+pool2d_7.tmp_0 {1,1536,1,1} 0.7498912 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.015033395 0.6216395 0.14709876 0.63672537 0.0 0.0 0.0041093696 0.7847073 0.0 0.07048465 0.23359808 0.8401219 0.23919891 0.1128789 0.0 0.1553514 0.3069055 0.0 0.0 0.8609365 0.22103554 ...
+dropout_0.tmp_0 {1,1536,1,1} 0.599913 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.012026716 0.4973116 0.117679015 0.5093803 0.0 0.0 0.0032874958 0.62776583 0.0 0.056387722 0.18687847 0.67209756 0.19135913 0.090303116 0.0 0.12428112 0.2455244 0.0 0.0 0.68874925 ...
+```
+
+其中第二行为op相关信息,标明了执行哪个op出现了diff及其对应的输入输出变量名。Tensor File info为Paddle-Lite框架的输出信息,而Fluid Tensor info为Paddle-Fluid框架的相应输出信息。
+示例中的`dropout_0.tmp_1`没有相应的tensor信息是因为工具检测到其在预测的后序流程中未被使用,因此不会对预测结果造成影响,从而将其自动屏蔽掉以保证输出尽量简洁。
+
+## 其他选项:
+
+| Option | Description |
+| --------------------------- | ------------------------------------------------------------ |
+| --input_file | 输入文件名,不同field以逗号分隔,相同field内以空格分隔, 只有文件中的第一行输入信息会被使用. 如果您不指定input_file,那么所有输入将会被置为1。注意:`debug_py_stage`目前不支持多field输入。 |
+| --cpp_topo_file | 存储运行时拓扑信息,由`debug_cpp_stage`写入并且由`debug_py_stage`读取使用。 默认为`topo_file.txt` 。 |
+| --cpp_tensor_file | 存储`debug_cpp_stage` 在运行拓扑序下的输出信息,默认为 `tensor_cpp.txt` 。 |
+| --tensor_names | 如果此选项不为空,那么只输出由此选项中指定名字的variable/weight信息,名字间用逗号分隔。 |
+| --tensor_output_length | 输出数据的长度,默认为全部输出。 |
+| --py_threshold | 判断diff发生的阈值,默认为 `1e-5` 。 |
+| --py_tensor_file | 存储`debug_py_stage` 在运行拓扑序下的输出信息,默认为`tensor_py.txt`. |
+| --py_output_file | diff信息的存储文件,默认为`diff.txt`。 |
+| --py_only_output_first_diff | 是否只输出运行时拓扑序中第一个有diff的var/op信息,默认为true |
+
+您可以参考 `check_model.sh` 脚本中的代码以获得更多细节.
+
+## Basic Profiler
+
+Basic profiler 用于 CPU 上kernel 耗时的统计,在 cmake 时添加 `-DLITE_WITH_PROFILER=ON` ,就可以开启相应支持。
+
+在模型执行完毕后,会自动打印类似如下 profiler 的日志
+
+```
+ kernel average min max count
+ feed/def/1/4/2 0 0 0 1
+ conv2d/def/4/1/1 1175 1175 1175 1
+ conv2d/def/4/1/1 1253 1253 1253 1
+ depthwise_conv2d/def/4/1/1 519 519 519 1
+ conv2d/def/4/1/1 721 721 721 1
+ elementwise_add/def/4/1/1 18 18 18 1
+ conv2d/def/4/1/1 2174 2174 2174 1
+ depthwise_conv2d/def/4/1/1 380 380 380 1
+ conv2d/def/4/1/1 773 773 773 1
+ elementwise_add/def/4/1/1 2 2 2 1
+ conv2d/def/4/1/1 1248 1248 1248 1
+ depthwise_conv2d/def/4/1/1 492 492 492 1
+ conv2d/def/4/1/1 1150 1150 1150 1
+ elementwise_add/def/4/1/1 33 33 33 1
+ elementwise_add/def/4/1/1 3 3 3 1
+ conv2d/def/4/1/1 1254 1254 1254 1
+ depthwise_conv2d/def/4/1/1 126 126 126 1
+```
+
diff --git a/demos.md.toc.2019-08-26_222115 b/demos.md.toc.2019-08-26_222115
new file mode 100644
index 0000000000000000000000000000000000000000..ab60264fec5f4a2b9192646803cf7dd21460dfde
--- /dev/null
+++ b/demos.md.toc.2019-08-26_222115
@@ -0,0 +1,19 @@
+-e -e * [Java Android Demo](#java-android-demo)
+ * [编译](#编译)
+ * [准备 demo 需要的其他文件](#准备-demo-需要的其他文件)
+ * [脚本方法](#脚本方法)
+ * [手动拷贝方法](#手动拷贝方法)
+ * [把 .so 动态库和 .jar 拷贝进安卓demo程序:](#把-so-动态库和-jar-拷贝进安卓demo程序)
+ * [把demo使用到的模型文件拷贝进安卓程序:](#把demo使用到的模型文件拷贝进安卓程序)
+ * [运行 Android 程序结果](#运行-android-程序结果)
+ * [C Demo](#c-demo)
+ * [编译](#编译-1)
+ * [准备执行环境](#准备执行环境)
+ * [使用安卓手机](#使用安卓手机)
+ * [使用安卓模拟器](#使用安卓模拟器)
+ * [下载模型并运行示例](#下载模型并运行示例)
+ * [Demo 程序运行结果](#demo-程序运行结果)
+ * [如何在代码中使用 API](#如何在代码中使用-api)
+
+
+
diff --git a/demos.md.toc.2019-08-26_222307 b/demos.md.toc.2019-08-26_222307
new file mode 100644
index 0000000000000000000000000000000000000000..ae8ee6b208e60c06ca8359cfe4102df943e917bc
--- /dev/null
+++ b/demos.md.toc.2019-08-26_222307
@@ -0,0 +1,19 @@
+-e -e * [Java Android Demo](#java-android-demo)
+ * [编译](#编译)
+ * [准备 demo 需要的其他文件](#准备-demo-需要的其他文件)
+ * [脚本方法](#脚本方法)
+ * [手动拷贝方法](#手动拷贝方法)
+ * [把 .so 动态库和 .jar 拷贝进安卓demo程序:](#把-so-动态库和-jar-拷贝进安卓demo程序)
+ * [把demo使用到的模型文件拷贝进安卓程序:](#把demo使用到的模型文件拷贝进安卓程序)
+ * [运行 Android 程序结果](#运行-android-程序结果)
+ * [C Demo](#c-demo)
+ * [编译](#编译-1)
+ * [准备执行环境](#准备执行环境)
+ * [使用安卓手机](#使用安卓手机)
+ * [使用安卓模拟器](#使用安卓模拟器)
+ * [下载模型并运行示例](#下载模型并运行示例)
+ * [Demo 程序运行结果](#demo-程序运行结果)
+ * [如何在代码中使用 API](#如何在代码中使用-api)
+
+
+
diff --git a/for-developer.md b/for-developer.md
new file mode 100644
index 0000000000000000000000000000000000000000..8c01f6e1e07f1d81d38f5c51d4d9b4bbeb2efce2
--- /dev/null
+++ b/for-developer.md
@@ -0,0 +1,15 @@
+# 基础须知
+
+可以参考 [Paddle 开发者文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/advanced_usage/development/contribute_to_paddle/local_dev_guide.html)。
+
+# 提交PR
+
+需要在 commit message 里加上 `test=develop` 才能触发 CI
+
+# 版本发布检查清单
+
+1. 所有 feature 梳理,确认状态
+2. 所有 QA 测试结果梳理,确认版本可靠
+3. Release note 确认 review 通过
+4. 确认需要 release 的 binary 编译完毕
+
diff --git a/fpga.md b/fpga.md
new file mode 100644
index 0000000000000000000000000000000000000000..fdb48a26bf046279a4eebe69cc798042ed1fa163
--- /dev/null
+++ b/fpga.md
@@ -0,0 +1,107 @@
+# Lite基于fpga的模型预测
+
+Paddle Lite支持基于arm的fpga zu3/zu5/zu9的模型预测,提供armv8的交叉编译
+
+Lite基于fpga运行模型需要相应的fpga驱动,目前只支持百度edgeboard开发板
+
+**Lite实现fpga简介**
+
+Lite支持fpga作为后端硬件进行模型推理,其主要特性如下:
+
+- Lite中fpga的kernel(feed、fetch除外)均以FP16、NHWC的格式作为输入输出格式,所有的weights和bias仍为FP32、NCHW的格式,feed的输入和fetch的输出均为FP32、NCHW格式的数据,在提升计算速度的同时能做到用户对数据格式无感知
+
+- 对于fpga暂不支持的kernel,均会切回arm端运行,实现arm+fpga混合布署运行
+
+- 目前fpga成本功耗都较低,Lite基于fpga的模型性能远远好于arm端,可作为边缘设备首选硬件
+# 编译
+
+需要提前准备带有fpgadrv.ko的fpga开发板(如edgeboard开发板)和Lite代码
+
+CMAKE编译选项:
+
+- 设置`LITE_WITH_FPGA=ON`和`LITE_WITH_ARM=ON`
+
+其他编译选项与ARM编译相同,可以参考[“Paddle Lite在Docker下的ARM编译”](./source_compile)。
+示例如下:
+```shell
+ cmake .. \
+ -DWITH_GPU=OFF \
+ -DWITH_MKL=OFF \
+ -DWITH_LITE=ON \
+ -DLITE_WITH_CUDA=OFF \
+ -DLITE_WITH_X86=OFF \
+ -DLITE_WITH_ARM=ON \
+ -DLITE_WITH_OPENMP=ON \
+ -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
+ -DWITH_TESTING=ON \
+ -DLITE_WITH_FPGA=ON \
+ -DARM_TARGET_OS=armlinux
+ make -j2
+```
+Lite提供fpga编译脚本,位于lite/tools/build_fpga.sh,在Lite根目录执行该脚本即可编译
+
+# 运行示例
+
+- **运行文件准备**
+
+下面以Resnet50模型为例,介绍如何使用edgeboard开发板实现模型运行
+
+```bash
+#连接开发板,并利用screen命令启动 [本机执行]
+screen /dev/cu.SLAB_USBtoUART 115200
+#查看开发板ip并ssh登录到开发板,假设开发板ip为192.0.1.1 [本机执行]
+ssh root@192.0.1.1
+
+#在开发板上建立目录workspace,拷贝fpga驱动fpgadrv.ko到workspace目录 [开发板执行]
+mkdir workspace && scp $DRIVER_PATH/fpgadrv.ko workspace
+
+#将Lite中编译好的测试程序拷贝到开发板workspace目录 [本机执行]
+scp $LITE_ROOT/build_fpga/lite/api/test_resnet50_fpga root@$EDGEBOARD_IP:workspace/
+#把Resnet50的模型和参数scp到开发板workspace目录 [本机执行]
+scp -r $LITE_ROOT/build_fpga/lite/third_party/install/resnet50/ root@$EDGEBOARD_IP:workspace/
+
+#在运行模型前需要加载fpga驱动 [开发板执行]
+insmod fpgadrv.ko
+#给测试程序添加可运行权限 [开发板执行]
+chmod +x test_resnet50_fpga
+```
+
+- **使用fpga进行模型预测**
+
+```bash
+#以下命令均在开发板上运行
+#直接运行单测程序
+./test_resnet50_fpga --model_dir=resnet50
+#如果需要测试性能,可以用repeats参数设置模型运行次数(如1000),同时可以设置预热次数(如10)来让硬件事先运行到稳定水平
+./test_resnet50_fpga --model_dir=resnet50 --repeats=1000 --warmup=10
+```
+
+# 如何在Code中使用
+
+在Lite中使用fpga与ARM相似,具体的区别如下:
+
+- 由于fpga运行模式为fp16精度、nhwc布局,所以需要修改相应的`valid_place`和`preferred_place`
+- fpga不需要device的初始化和运行模式设置
+
+代码示例:
+```cpp
+lite::Predictor predictor;
+std::vector valid_places(
+ {Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)},
+ Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNHWC)}});
+Place preferred_place = Place{TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC)};
+
+predictor.Build(model_dir, preferred_place, valid_places);
+
+auto* input_tensor = predictor.GetInput(0);
+input_tensor->Resize(DDim(std::vector({1, 3, 224, 224})));
+auto* data = input_tensor->mutable_data();
+auto item_size = input_tensor->dims().production();
+//假设设置输入数据全为1
+for (int i = 0; i < item_size; i++) {
+ data[i] = 1;
+}
+
+predictor.Run();
+auto* out = predictor.GetOutput(0);
+```
diff --git a/images/architecture.jpg b/images/architecture.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0e6caa88a932553e212cbd899515ef4f5366839a
Binary files /dev/null and b/images/architecture.jpg differ
diff --git a/images/benchmark_result.png b/images/benchmark_result.png
new file mode 100644
index 0000000000000000000000000000000000000000..d991fefc7ec3436381eeacc515aace2d688ff13f
Binary files /dev/null and b/images/benchmark_result.png differ
diff --git a/images/img_mobilenetv1_inference.png b/images/img_mobilenetv1_inference.png
new file mode 100644
index 0000000000000000000000000000000000000000..931442dd849c68dd4013219575635d974e092662
Binary files /dev/null and b/images/img_mobilenetv1_inference.png differ
diff --git a/images/lite1.png b/images/lite1.png
new file mode 100644
index 0000000000000000000000000000000000000000..711330ee7661943543475d862504695ad4b7327e
Binary files /dev/null and b/images/lite1.png differ
diff --git a/images/model_quan_fig.png b/images/model_quan_fig.png
new file mode 100644
index 0000000000000000000000000000000000000000..ea6571509e4b8f1fa00ee8f9ffb0a6870b740d0f
Binary files /dev/null and b/images/model_quan_fig.png differ
diff --git a/images/model_quan_table1.png b/images/model_quan_table1.png
new file mode 100644
index 0000000000000000000000000000000000000000..53bc672246c341dd46dbb7a269ff2b3d1c35a05d
Binary files /dev/null and b/images/model_quan_table1.png differ
diff --git a/images/phone_list.png b/images/phone_list.png
new file mode 100644
index 0000000000000000000000000000000000000000..e2efc37b003baf0a8376e80dea1b32923f5fe558
Binary files /dev/null and b/images/phone_list.png differ
diff --git a/images/run_benchmark.png b/images/run_benchmark.png
new file mode 100644
index 0000000000000000000000000000000000000000..e3d349b936d8082646569eb0b38d0dc6bcadada8
Binary files /dev/null and b/images/run_benchmark.png differ
diff --git a/java_demo.md b/java_demo.md
new file mode 100644
index 0000000000000000000000000000000000000000..5cfaf9ba60747fbe4096865db5a96b9a15218770
--- /dev/null
+++ b/java_demo.md
@@ -0,0 +1,112 @@
+
+* [Java Android Demo](#java-android-demo)
+ * [编译](#编译)
+ * [准备 demo 需要的其他文件](#准备-demo-需要的其他文件)
+ * [脚本方法](#脚本方法)
+ * [手动拷贝方法](#手动拷贝方法)
+ * [把 .so 动态库和 .jar 拷贝进安卓demo程序:](#把-so-动态库和-jar-拷贝进安卓demo程序)
+ * [把demo使用到的模型文件拷贝进安卓程序:](#把demo使用到的模型文件拷贝进安卓程序)
+ * [运行 Android 程序结果](#运行-android-程序结果)
+
+
+
+
+
+# Java Android Demo
+本节中,Java demo 完整代码位于 [demo/java](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite/demo/java) 。
+
+要编译和跑起Android demo 程序 PaddlePredictor,你需要准备:
+
+1. 一台能运行安卓程序的安卓手机
+2. 一台带有AndroidStudio的开发机
+
+## 编译
+
+首先在PaddleLite的开发 [Docker镜像](./source_compile) 中,拉取最新PaddleLite代码,编译对应你手机架构的预测库,
+下面我们以arm8 架构举例。进入paddlelite 目录,运行以下命令:
+
+```shell
+./lite/tools/build.sh \
+ --arm_os=android \
+ --arm_abi=armv8 \
+ --arm_lang=gcc \
+ --android_stl=c++_static \
+ tiny_publish
+```
+
+命令完成后查看要存在
+
+```
+./build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/java/so/libpaddle_lite_jni.so
+./build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/java/jar/PaddlePredictor.jar
+./build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/demo/java/android
+```
+
+libpaddle_lite_jni.so为 PaddleLite c++ 动态链接库,PaddlePredictor.jar为 Java jar 包,两者包含 PaddleLite Java API,接下来 Android Java 代码会使用这些api。android文件夹中则是Android demo。
+
+## 准备 demo 需要的其他文件
+
+Demo 除了代码,还需要准备在Android工程目录下配置好JNI .so 库(上节提到的`libpaddle_lite_jni.so`),Java .jar 包(上文提到的`PaddlePredictor.jar` ),和模型文件。我们提供了自动化的脚本和手动拷贝两种方法,用户可以根据自己需要选择:
+
+### 脚本方法
+
+进入 `build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/demo/java/android`,我们准备了一个脚本`prepare_demo.bash`,脚本输入一个参数,为你要拷贝的.so 对应的架构文件夹名。
+
+例如运行
+
+```
+bash prepare_demo.bash arm8
+```
+
+该脚本自动下载并解压缩模型文件,拷贝了 .jar 包进demo,还有生成的.so包进`PaddlePredictor/app/src/main/jinLibs/架构文件夹下`,
+在我们这个例子里,armv8 就是架构文件夹。备注:这种方式构建的 demo 在 armv8 手机运行正常。如果要demo 程序在别的手机架构(如 armv7)上也运行正常,需要添加别的架构。
+
+### 手动拷贝方法
+
+接下来我们介绍手动拷贝,如果使用了脚本,那么可以跳过以下手动方法的介绍。
+
+### 把 .so 动态库和 .jar 拷贝进安卓demo程序:
+
+1. 将PaddlePredictor 载入到AndroidStudio。
+2. 将`libpaddle_lite_jni.so`拷贝进 `PaddlePredictor/app/src/main/jinLibs/架构文件夹下` ,比如文件夹arm8里要包含该 .so文件。
+3. 将 `PaddlePredictor.jar` 拷贝进 `PaddlePredictor/app/libs` 下
+
+### 把demo使用到的模型文件拷贝进安卓程序:
+
+下载我们的5个模型文件,并解压缩到 `PaddlePredictor/app/src/main/assets` 这个文件夹中
+需要拷贝的模型文件和下载地址:
+
+```
+inception_v4_simple_opt.nb http://paddle-inference-dist.bj.bcebos.com/inception_v4_simple_opt.nb.tar.gz
+lite_naive_model_opt.nb http://paddle-inference-dist.bj.bcebos.com/lite_naive_model_opt.nb.tar.gz
+mobilenet_v1_opt.nb http://paddle-inference-dist.bj.bcebos.com/mobilenet_v1_opt.nb.tar.gz
+mobilenet_v2_relu_opt.nb http://paddle-inference-dist.bj.bcebos.com/mobilenet_v2_relu_opt.nb.tar.gz
+resnet50_opt.nb http://paddle-inference-dist.bj.bcebos.com/resnet50_opt.nb.tar.gz
+```
+
+下载完后,assets文件夹里要包含解压后的上面五个模型文件夹,但demo里不需要保存原压缩.tar.gz 文件。
+
+注意:输入的模型要求为naive buffer存储格式,您可以通过 [**Model Optimize Tool**](./model_optimize_tool) 将fluid模型转为naive buffer存储格式。
+
+## 运行 Android 程序结果
+
+以上准备工作完成,就可以开始Build 、安装、和运行安卓demo程序。当你运行PaddlePredictor 程序时,大概会等10秒,然后看到类似以下字样:
+
+```
+lite_naive_model output: 50.213173, -28.872887
+expected: 50.2132, -28.8729
+
+inception_v4_simple test:true
+time: xxx ms
+
+resnet50 test:true
+time: xxx ms
+
+mobilenet_v1 test:true
+time: xxx ms
+
+mobilenet_v2 test:true
+time: xxx ms
+```
+
+该 demo 程序跑我们的 5 个模型,第一个模型结果将真正的头两个数字输出,并在第二行附上期望的正确值。你应该要看到他们的误差小于0.001。后面四个模型如果你看到 `test:true` 字样,说明模型输出通过了我们在 demo 程序里对其输出的测试。time 代表该测试花费的时间。
diff --git a/lite/CMakeLists.txt b/lite/CMakeLists.txt
deleted file mode 100644
index 937781293a77732fa6c115327b1a4c824c1f0930..0000000000000000000000000000000000000000
--- a/lite/CMakeLists.txt
+++ /dev/null
@@ -1,159 +0,0 @@
-include(lite)
-
-message(WARNING "Lite enabled!")
-message(STATUS "LIGHT_FRAMEWORK:\t${LITE_WITH_LIGHT_WEIGHT_FRAMEWORK}")
-message(STATUS "LITE_WITH_CUDA:\t${LITE_WITH_CUDA}")
-message(STATUS "LITE_WITH_X86:\t${LITE_WITH_X86}")
-message(STATUS "LITE_WITH_ARM:\t${LITE_WITH_ARM}")
-message(STATUS "LITE_WITH_NPU:\t${LITE_WITH_NPU}")
-message(STATUS "LITE_WITH_FPGA:\t${LITE_WITH_FPGA}")
-message(STATUS "LITE_WITH_PROFILE:\t${LITE_WITH_PROFILE}")
-
-set(LITE_MODEL_DIR "${THIRD_PARTY_PATH}/install")
-set(LITE_ON_MOBILE ${LITE_WITH_LIGHT_WEIGHT_FRAMEWORK})
-
-add_subdirectory(utils)
-add_subdirectory(operators)
-add_subdirectory(kernels)
-add_subdirectory(core)
-add_subdirectory(model_parser)
-add_subdirectory(api)
-add_subdirectory(fluid)
-add_subdirectory(backends)
-
-if (NOT LITE_ON_TINY_PUBLISH)
- add_subdirectory(tests)
- add_subdirectory(tools)
-endif()
-if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND NOT LITE_ON_TINY_PUBLISH)
- add_subdirectory(gen_code)
-endif()
-
-if (WITH_TESTING)
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "lite_naive_model.tar.gz")
- if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v1.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v2_relu.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "resnet50.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "inception_v4_simple.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "MobileNetV1_quant.tar.gz")
- endif()
- if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "GoogleNet_inference.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v1.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "mobilenet_v2_relu.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "resnet50.tar.gz")
- lite_download_and_uncompress(${LITE_MODEL_DIR} ${LITE_URL} "inception_v4_simple.tar.gz")
- endif()
-endif()
-
-if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
- # for publish
- set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}")
- if (LITE_WITH_OPENCL)
- set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.opencl")
- endif(LITE_WITH_OPENCL)
- if (LITE_WITH_NPU)
- set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.npu")
- endif(LITE_WITH_NPU)
- message(STATUS "publish inference lib to ${INFER_LITE_PUBLISH_ROOT}")
-
- # The final target for publish lite lib
- add_custom_target(publish_inference)
- if (NOT LITE_ON_TINY_PUBLISH)
- # add cxx lib
- add_custom_target(publish_inference_cxx_lib ${TARGET}
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/lib"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/bin"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/include"
- COMMAND cp "${CMAKE_SOURCE_DIR}/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/cxx/include"
- COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_full_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib"
- COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_light_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib"
- #COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/model_optimize_tool" "${INFER_LITE_PUBLISH_ROOT}/bin"
- COMMAND cp "${CMAKE_BINARY_DIR}/lite/gen_code/paddle_code_generator" "${INFER_LITE_PUBLISH_ROOT}/bin"
- COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/test_model_bin" "${INFER_LITE_PUBLISH_ROOT}/bin"
- )
- if(NOT IOS)
- #add_dependencies(publish_inference_cxx_lib model_optimize_tool)
- add_dependencies(publish_inference_cxx_lib paddle_code_generator)
- add_dependencies(publish_inference_cxx_lib bundle_full_api)
- add_dependencies(publish_inference_cxx_lib bundle_light_api)
- add_dependencies(publish_inference_cxx_lib test_model_bin)
- add_dependencies(publish_inference publish_inference_cxx_lib)
- add_custom_command(TARGET publish_inference_cxx_lib POST_BUILD
- COMMAND ${CMAKE_STRIP} "--strip-debug" ${INFER_LITE_PUBLISH_ROOT}/cxx/lib/*.a)
- endif()
- else()
- if (IOS OR (ARM_TARGET_OS STREQUAL "armlinux"))
- add_custom_target(tiny_publish_lib ${TARGET}
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/lib"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/include"
- COMMAND cp "${CMAKE_SOURCE_DIR}/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/include"
- COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_light_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/lib"
- )
- add_dependencies(tiny_publish_lib bundle_light_api)
- add_dependencies(publish_inference tiny_publish_lib)
- endif()
- endif()
-
-
- if (LITE_WITH_JAVA)
- # add java lib
- add_custom_target(publish_inference_java_lib ${TARGET}
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/java/so"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/java/jar"
- COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/android/jni/native/libpaddle_lite_jni.so" "${INFER_LITE_PUBLISH_ROOT}/java/so"
- COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/android/jni/PaddlePredictor.jar" "${INFER_LITE_PUBLISH_ROOT}/java/jar"
- COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/api/android/jni/src" "${INFER_LITE_PUBLISH_ROOT}/java"
- )
- add_dependencies(publish_inference_java_lib paddle_lite_jni PaddlePredictor)
- add_dependencies(publish_inference publish_inference_java_lib)
- add_custom_command(TARGET publish_inference_java_lib POST_BUILD
- COMMAND ${CMAKE_STRIP} "-s" ${INFER_LITE_PUBLISH_ROOT}/java/so/libpaddle_lite_jni.so)
- endif()
-
- if ((ARM_TARGET_OS STREQUAL "android") AND (NOT LITE_WITH_OPENCL) AND
- ((ARM_TARGET_ARCH_ABI STREQUAL armv7) OR (ARM_TARGET_ARCH_ABI STREQUAL armv8)))
- if (NOT LITE_ON_TINY_PUBLISH)
- # copy
- add_custom_target(publish_inference_android_cxx_demos ${TARGET}
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/third_party"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/include"
- COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/install/gflags" "${INFER_LITE_PUBLISH_ROOT}/third_party"
- COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/Makefile.def" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
- COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/README.md" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
- COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mobile_full" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
- COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_full/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_full/Makefile"
- COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mobile_light" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
- COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_light/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_light/Makefile"
- )
- add_dependencies(publish_inference_android_cxx_demos logging gflags)
- add_dependencies(publish_inference_cxx_lib publish_inference_android_cxx_demos)
- endif()
-
- if (LITE_WITH_JAVA)
- # copy java mobile_light demo/lib
- add_custom_target(publish_inference_android_java_demo ${TARGET}
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/java"
- COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/java/android" "${INFER_LITE_PUBLISH_ROOT}/demo/java"
- COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/java/README.md" "${INFER_LITE_PUBLISH_ROOT}/demo/java"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/java/android/PaddlePredictor/app/libs"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/java/android/PaddlePredictor/app/src/main/jniLibs/arm7"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/java/android/PaddlePredictor/app/src/main/jniLibs/arm8"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/java/android/PaddlePredictor/app/src/main/jniLibs/arm64-v8a"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/java/android/PaddlePredictor/app/src/main/jniLibs/armeabi-v7a"
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/java/android/PaddlePredictor/app/src/main/jniLibs/x86"
- )
- add_dependencies(publish_inference_java_lib publish_inference_android_java_demo)
- endif()
- endif()
-
- if (LITE_WITH_OPENCL)
- add_custom_target(publish_inference_opencl ${TARGET}
- COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/opencl"
- COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/backends/opencl/cl_kernel" "${INFER_LITE_PUBLISH_ROOT}/opencl"
- )
- add_dependencies(publish_inference_cxx_lib publish_inference_opencl)
- endif()
-endif()
diff --git a/lite/api/CMakeLists.txt b/lite/api/CMakeLists.txt
deleted file mode 100644
index 7767458b3789eeb6c5775ae0f86da121aee10820..0000000000000000000000000000000000000000
--- a/lite/api/CMakeLists.txt
+++ /dev/null
@@ -1,239 +0,0 @@
-if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- lite_cc_library(place SRCS paddle_place.cc DEPS logging)
-else()
- lite_cc_library(place SRCS paddle_place.cc DEPS glog)
-endif(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
-
-if (WITH_TESTING)
- lite_cc_library(lite_api_test_helper SRCS lite_api_test_helper.cc
- DEPS scope optimizer target_wrapper_host model_parser program
- ${ops} ${host_kernels}
- CUDA_DEPS ${cuda_kernels}
- X86_DEPS ${x86_kernels})
-endif()
-if(LITE_WITH_FPGA)
- set(light_api_deps ${light_api_deps} ${fpga_deps})
- set(cxx_api_deps ${cxx_api_deps} ${fpga_deps})
-endif()
-
-message(STATUS "get ops ${ops}")
-message(STATUS "get X86 kernels ${x86_kernels}")
-message(STATUS "get Host kernels ${host_kernels}")
-message(STATUS "get ARM kernels ${arm_kernels}")
-message(STATUS "get NPU kernels ${npu_kernels}")
-message(STATUS "get FPGA kernels ${fpga_kernels}")
-
-# for full api
-if (NOT LITE_ON_TINY_PUBLISH)
- set(cxx_api_deps
- scope optimizer target_wrapper_host model_parser program)
- lite_cc_library(cxx_api
- SRCS cxx_api.cc
- DEPS ${cxx_api_deps} ${ops} ${host_kernels} program
- X86_DEPS ${x86_kernels}
- ARM_DEPS ${arm_kernels}
- NPU_DEPS ${npu_kernels} ${npu_bridges} npu_pass
- CL_DEPS ${opencl_kenrels}
- FPGA_DEPS ${fpga_kenrels})
-endif()
-
-# for light api
-set(light_api_deps
- scope target_wrapper_host model_parser program)
-if(LITE_WITH_CUDA)
- set(light_api_deps ${light_api_deps} target_wrapper_cuda)
-endif()
-lite_cc_library(light_api SRCS light_api.cc
- DEPS scope target_wrapper_host model_parser
- ${light_api_deps} ${ops} ${host_kernels} program
- CUDA_DEPS ${cuda_kernels}
- X86_DEPS ${x86_kernels}
- ARM_DEPS ${arm_kernels}
- NPU_DEPS ${npu_kernels} ${npu_bridges} npu_pass
- CL_DEPS ${opencl_kenrels}
- FPGA_DEPS ${fpga_kenrels})
-
-include(ExternalProject)
-set(LITE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING
- "A path setting inference demo download directories.")
-
-if(WITH_TESTING)
- lite_cc_test(test_cxx_api SRCS cxx_api_test.cc
- DEPS cxx_api mir_passes lite_api_test_helper
- ${ops} ${host_kernels}
- X86_DEPS ${x86_kernels}
- ARM_DEPS ${arm_kernels}
- NPU_DEPS ${npu_kernels}
- CL_DEPS ${opencl_kernels}
- FPGA_DEPS ${fpga_kernels}
- EXCLUDE_COMPILE_DEPS "ON"
- ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model
- --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL)
- add_dependencies(test_cxx_api extern_lite_download_lite_naive_model_tar_gz)
- if(NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
- lite_cc_test(test_googlenet SRCS test_googlenet_lite.cc
- DEPS cxx_api mir_passes lite_api_test_helper
- ${ops} ${host_kernels} ${x86_kernels}
- ARGS --model_dir=${LITE_MODEL_DIR}/googlenet)
- add_dependencies(test_googlenet extern_lite_download_GoogleNet_inference_tar_gz)
- lite_cc_test(test_mobilenetv1_lite_x86 SRCS test_mobilenetv1_lite_x86.cc
- DEPS cxx_api mir_passes lite_api_test_helper
- ${ops} ${host_kernels} ${x86_kernels}
- ARGS --model_dir=${LITE_MODEL_DIR}/mobilenet_v1)
- add_dependencies(test_mobilenetv1_lite_x86 extern_lite_download_mobilenet_v1_tar_gz)
- lite_cc_test(test_mobilenetv2_lite_x86 SRCS test_mobilenetv2_lite_x86.cc
- DEPS cxx_api mir_passes lite_api_test_helper
- ${ops} ${host_kernels} ${x86_kernels}
- ARGS --model_dir=${LITE_MODEL_DIR}/mobilenet_v2_relu)
- add_dependencies(test_mobilenetv2_lite_x86 extern_lite_download_mobilenet_v2_relu_tar_gz)
- lite_cc_test(test_inceptionv4_lite_x86 SRCS test_inceptionv4_lite_x86.cc
- DEPS cxx_api mir_passes lite_api_test_helper
- ${ops} ${host_kernels} ${x86_kernels}
- ARGS --model_dir=${LITE_MODEL_DIR}/inception_v4_simple)
- add_dependencies(test_inceptionv4_lite_x86 extern_lite_download_inception_v4_simple_tar_gz)
- endif()
-endif()
-
-if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND WITH_TESTING)
- set(lite_model_test_DEPS cxx_api mir_passes ${ops} ${host_kernels} ${arm_kernels} ${npu_kernels} ${fpga_kernels})
-
- lite_cc_test(test_mobilenetv1_int8 SRCS mobilenetv1_int8_test.cc
- DEPS ${lite_model_test_DEPS}
- CL_DEPS ${opencl_kernels}
- ARGS --cl_path=${CMAKE_SOURCE_DIR}/lite/backends/opencl
- --model_dir=${LITE_MODEL_DIR}/MobilenetV1_quant SERIAL)
- add_dependencies(test_mobilenetv1_int8 extern_lite_download_MobileNetV1_quant_tar_gz)
-
- lite_cc_test(test_mobilenetv1 SRCS mobilenetv1_test.cc
- DEPS ${lite_model_test_DEPS}
- CL_DEPS ${opencl_kernels}
- NPU_DEPS ${npu_kernels} ${npu_bridges}
- ARGS --cl_path=${CMAKE_SOURCE_DIR}/lite/backends/opencl
- --model_dir=${LITE_MODEL_DIR}/mobilenet_v1 SERIAL)
- add_dependencies(test_mobilenetv1 extern_lite_download_mobilenet_v1_tar_gz)
- set(LINK_FLAGS "-Wl,--version-script ${PADDLE_SOURCE_DIR}/lite/core/lite.map")
- set_target_properties(test_mobilenetv1 PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
-
- lite_cc_test(test_mobilenetv2 SRCS mobilenetv2_test.cc
- DEPS ${lite_model_test_DEPS}
- CL_DEPS ${opencl_kernels}
- ARGS --cl_path=${CMAKE_SOURCE_DIR}/lite/backends/opencl
- --model_dir=${LITE_MODEL_DIR}/mobilenet_v2_relu SERIAL)
- add_dependencies(test_mobilenetv2 extern_lite_download_mobilenet_v2_relu_tar_gz)
- set_target_properties(test_mobilenetv2 PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
-
- lite_cc_test(test_resnet50 SRCS resnet50_test.cc
- DEPS ${lite_model_test_DEPS} paddle_api_light
- CL_DEPS ${opencl_kernels}
- FPGA_DEPS ${fpga_kernels}
- ARGS --cl_path=${CMAKE_SOURCE_DIR}/lite/backends/opencl
- --model_dir=${LITE_MODEL_DIR}/resnet50 SERIAL)
- add_dependencies(test_resnet50 extern_lite_download_resnet50_tar_gz)
-
- lite_cc_test(test_resnet50_fpga SRCS resnet50_test_fpga.cc
- DEPS ${lite_model_test_DEPS}
- CL_DEPS ${opencl_kernels}
- FPGA_DEPS ${fpga_kernels})
-
- lite_cc_test(test_inceptionv4 SRCS inceptionv4_test.cc
- DEPS ${lite_model_test_DEPS}
- CL_DEPS ${opencl_kernels}
- ARGS --cl_path=${CMAKE_SOURCE_DIR}/lite/backends/opencl
- --model_dir=${LITE_MODEL_DIR}/inception_v4 SERIAL)
- add_dependencies(test_inceptionv4 extern_lite_download_inception_v4_simple_tar_gz)
- # lite_cc_test(test_ocr_attention SRCS ocr_attention_test.cc
- # DEPS ${lite_model_test_DEPS})
-
- # lite_cc_test(model_run_test_image SRCS model_run_test_image.cc
- # DEPS ${lite_model_test_DEPS}
- # CL_DEPS ${opencl_kernels}
- # FPGA_DEPS ${fpga_kernels})
-endif()
-
-# These tests needs CLI arguments, and is not supported in ARM CI.
-# TODO(Superjomn) support latter.
-lite_cc_test(test_light_api SRCS light_api_test.cc
- DEPS light_api program mir_passes
- CL_DEPS ${opencl_kernels}
- FPGA_DEPS ${fpga_kernels}
- ARGS --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL)
-
-lite_cc_test(test_apis SRCS apis_test.cc
- DEPS cxx_api light_api ${ops}
- CL_DEPS ${opencl_kernels}
- X86_DEPS ${x86_kernels}
- FPGA_DEPS ${fpga_kernels}
- ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model
- --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL)
-
-lite_cc_library(paddle_api SRCS paddle_api.cc DEPS op_params tensor)
-
-#-----------------------------------------------------------------------------------------------------
-# The final inference library for both CxxConfig and MobileConfig.
-if (LITE_ON_TINY_PUBLISH)
- lite_cc_library(paddle_api_light SRCS light_api_impl.cc DEPS light_api paddle_api stream)
-else()
- lite_cc_library(paddle_api_light SRCS light_api_impl.cc DEPS light_api paddle_api)
-endif()
-if (NOT LITE_ON_TINY_PUBLISH)
- lite_cc_library(paddle_api_full SRCS cxx_api_impl.cc DEPS cxx_api paddle_api_light
- ${ops}
- ARM_DEPS ${arm_kernels}
- NPU_DEPS ${npu_kernels}
- CL_DEPS ${opencl_kernels}
- FPGA_DEPS ${fpga_kernels})
- # The final inference library for just MobileConfig.
- bundle_static_library(paddle_api_full paddle_api_full_bundled bundle_full_api)
-endif()
-bundle_static_library(paddle_api_light paddle_api_light_bundled bundle_light_api)
-#-----------------------------------------------------------------------------------------------------
-
-if (LITE_WITH_JAVA AND LITE_WITH_ARM)
- add_subdirectory(android)
-endif()
-
-if (LITE_ON_TINY_PUBLISH)
- return()
-endif()
-
-if (LITE_ON_MODEL_OPTIMIZE_TOOL)
- message(STATUS "Compiling model_optimize_tool")
- lite_cc_binary(model_optimize_tool SRCS model_optimize_tool.cc cxx_api_impl.cc paddle_api.cc cxx_api.cc
- DEPS gflags kernel op optimizer mir_passes utils)
- add_dependencies(model_optimize_tool op_list_h kernel_list_h all_kernel_faked_cc)
-endif(LITE_ON_MODEL_OPTIMIZE_TOOL)
-
-lite_cc_test(test_paddle_api SRCS paddle_api_test.cc DEPS paddle_api_full paddle_api_light
- ${ops}
- ARM_DEPS ${arm_kernels}
- NPU_DEPS ${npu_kernels}
- CL_DEPS ${opencl_kernels}
- X86_DEPS ${x86_kernels}
- FPGA_DEPS ${fpga_kernels}
- ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model SERIAL)
-if (WITH_TESTING)
- add_dependencies(test_paddle_api extern_lite_download_lite_naive_model_tar_gz)
-endif()
-
-# Some bins
-if(NOT IOS)
- lite_cc_binary(test_model_bin SRCS model_test.cc DEPS paddle_api_full paddle_api_light gflags utils
- ${ops}
- ARM_DEPS ${arm_kernels}
- NPU_DEPS ${npu_kernels}
- CL_DEPS ${opencl_kernels}
- FPGA_DEPS ${fpga_kernels}
- X86_DEPS ${x86_kernels})
- lite_cc_binary(benchmark_bin SRCS benchmark.cc DEPS paddle_api_full paddle_api_light gflags utils
- ${ops}
- ARM_DEPS ${arm_kernels}
- NPU_DEPS ${npu_kernels}
- CL_DEPS ${opencl_kernels}
- FPGA_DEPS ${fpga_kernels}
- X86_DEPS ${x86_kernels})
-endif()
-
-#lite_cc_binary(cxx_api_bin SRCS cxx_api_bin.cc
- #X86_DEPS operator
- #DEPS light_api model_parser target_wrapper_host mir_passes
- #ARM_DEPS ${arm_kernels}) NPU_DEPS ${npu_kernels})
diff --git a/lite/api/_paddle_use_kernels.h b/lite/api/_paddle_use_kernels.h
deleted file mode 100644
index 75756736f40a707fee06b0139f628b13225d04aa..0000000000000000000000000000000000000000
--- a/lite/api/_paddle_use_kernels.h
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
- * ATTENTION this header file can only include in .cc file.
- */
-
-#pragma once
-#include "paddle_lite_factory_helper.h" // NOLINT
-#ifndef LITE_WITH_FPGA
-USE_LITE_KERNEL(feed, kHost, kAny, kAny, def);
-USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def);
-USE_LITE_KERNEL(flatten, kHost, kAny, kAny, def);
-USE_LITE_KERNEL(flatten2, kHost, kAny, kAny, def);
-#else
-USE_LITE_KERNEL(feed, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(fetch, kFPGA, kFP16, kNHWC, def);
-#endif
-
-// host kernels
-USE_LITE_KERNEL(reshape, kHost, kAny, kAny, def);
-USE_LITE_KERNEL(reshape2, kHost, kAny, kAny, def);
-USE_LITE_KERNEL(multiclass_nms, kHost, kFloat, kNCHW, def);
-
-#ifdef LITE_WITH_ARM
-USE_LITE_KERNEL(fc, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(mul, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(matmul, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(scale, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(softmax, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(lrn, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(decode_bboxes, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(box_coder, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(conv2d, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(depthwise_conv2d, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(elementwise_sub, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(elementwise_mul, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(elementwise_max, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(elementwise_div, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(fusion_elementwise_div_activation, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(fusion_elementwise_add_activation, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(fusion_elementwise_mul_activation, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(fusion_elementwise_max_activation, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(split, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(dropout, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(concat, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(relu, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(relu6, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(transpose, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(transpose2, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(batch_norm, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(power, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(shuffle_channel, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(yolo_box, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(argmax, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(axpy, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(leaky_relu, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(relu_clipped, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(prelu, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(sigmoid, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(tanh, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(swish, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(log, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(exp, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(conv2d_transpose, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(pad2d, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(prior_box, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(density_prior_box, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(negative, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(crop, kARM, kFloat, kNCHW, def);
-
-USE_LITE_KERNEL(norm, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(sequence_softmax, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(im2sequence, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(bilinear_interp, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(nearest_interp, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(logical_xor, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(logical_and, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(less_than, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(top_k, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(increment, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(write_to_array, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(read_from_array, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(reduce_max, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(sequence_expand, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(sequence_pool, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(shape, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(fill_constant, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(cast, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(slice, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(affine_channel, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(anchor_generator, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(generate_proposals, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(squeeze, kARM, kFloat, kNCHW, def) // for x2paddle
-USE_LITE_KERNEL(squeeze2, kARM, kFloat, kNCHW, def) // for x2paddle
-USE_LITE_KERNEL(expand, kARM, kFloat, kNCHW, def) // for x2paddle
-USE_LITE_KERNEL(roi_align, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(box_clip, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(reduce_mean, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(stack, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(assign_value, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(hard_sigmoid, kARM, kFloat, kNCHW, def)
-
-USE_LITE_KERNEL(calib, kARM, kInt8, kNCHW, fp32_to_int8);
-USE_LITE_KERNEL(calib, kARM, kInt8, kNCHW, int8_to_fp32);
-USE_LITE_KERNEL(calib_once, kARM, kInt8, kNCHW, fp32_to_int8);
-USE_LITE_KERNEL(calib_once, kARM, kInt8, kNCHW, int8_to_fp32);
-USE_LITE_KERNEL(conv2d, kARM, kInt8, kNCHW, int8_out);
-USE_LITE_KERNEL(conv2d, kARM, kInt8, kNCHW, fp32_out);
-USE_LITE_KERNEL(fc, kARM, kInt8, kNCHW, int8out);
-USE_LITE_KERNEL(fc, kARM, kInt8, kNCHW, fp32out);
-USE_LITE_KERNEL(gru_unit, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(gru, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(beam_search_decode, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(beam_search, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(while, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(lod_reset, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(lookup_table, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(is_empty, kARM, kFloat, kNCHW, def)
-USE_LITE_KERNEL(assign, kARM, kFloat, kNCHW, def);
-#endif
-
-#ifdef LITE_WITH_X86
-// NOTE all the X86 kernels are disabled temporarily for kernel are changed.
-// USE_LITE_KERNEL(relu, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(mul, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(fc, kX86, kFloat, kNCHW, def);
-USE_LITE_KERNEL(scale, kX86, kFloat, kNCHW, def);
-USE_LITE_KERNEL(slice, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(fill_constant, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(square, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(elementwise_sub, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(elementwise_add, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(softmax, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(dropout, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(concat, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(conv2d, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(depthwise_conv2d, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(pool2d, kX86, kFloat, kNCHW, def);
-// USE_LITE_KERNEL(batch_norm, kX86, kFloat, kNCHW, def);
-#endif
-
-#ifdef LITE_WITH_CUDA
-USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def);
-USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device);
-USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host);
-USE_LITE_KERNEL(io_copy_once, kCUDA, kAny, kAny, host_to_device);
-USE_LITE_KERNEL(io_copy_once, kCUDA, kAny, kAny, device_to_host);
-USE_LITE_KERNEL(conv2d, kCUDA, kFloat, kNCHW, def);
-USE_LITE_KERNEL(leaky_relu, kCUDA, kFloat, kNCHW, def);
-USE_LITE_KERNEL(nearest_interp, kCUDA, kFloat, kNCHW, def);
-USE_LITE_KERNEL(yolo_box, kCUDA, kFloat, kNCHW, def);
-USE_LITE_KERNEL(concat, kCUDA, kFloat, kNCHW, def);
-#endif
-
-#ifdef LITE_WITH_OPENCL
-USE_LITE_KERNEL(io_copy, kOpenCL, kAny, kAny, host_to_device);
-USE_LITE_KERNEL(io_copy, kOpenCL, kAny, kAny, device_to_host);
-USE_LITE_KERNEL(io_copy_once, kOpenCL, kAny, kAny, host_to_device);
-USE_LITE_KERNEL(io_copy_once, kOpenCL, kAny, kAny, device_to_host);
-
-USE_LITE_KERNEL(fc, kOpenCL, kFloat, kNCHW, def);
-USE_LITE_KERNEL(mul, kOpenCL, kFloat, kNCHW, def);
-USE_LITE_KERNEL(elementwise_add, kOpenCL, kFloat, kNCHW, def);
-USE_LITE_KERNEL(fusion_elementwise_add_activation, kOpenCL, kFloat, kNCHW, def);
-USE_LITE_KERNEL(pool2d, kOpenCL, kFloat, kNCHW, def);
-USE_LITE_KERNEL(relu, kOpenCL, kFloat, kNCHW, def);
-USE_LITE_KERNEL(depthwise_conv2d, kOpenCL, kFloat, kNCHW, def);
-USE_LITE_KERNEL(conv2d, kOpenCL, kFloat, kNCHW, def);
-#endif
-
-#ifdef LITE_WITH_NPU
-USE_LITE_KERNEL(graph_op, kNPU, kFloat, kNCHW, def);
-#endif
-#ifdef LITE_WITH_FPGA
-USE_LITE_KERNEL(relu, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(conv2d, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(elementwise_add, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(fusion_elementwise_add_activation, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(fc, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(pool2d, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(scale, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(softmax, kFPGA, kFP16, kNHWC, def);
-USE_LITE_KERNEL(io_copy, kFPGA, kAny, kAny, host_to_device);
-USE_LITE_KERNEL(io_copy, kFPGA, kAny, kAny, device_to_host);
-USE_LITE_KERNEL(io_copy_once, kFPGA, kAny, kAny, host_to_device_once);
-USE_LITE_KERNEL(io_copy_once, kFPGA, kAny, kAny, device_to_host_once);
-USE_LITE_KERNEL(calib, kFPGA, kFP16, kNHWC, fp32_to_fp16_fpga);
-USE_LITE_KERNEL(calib, kFPGA, kFP16, kNHWC, fp16_to_fp32_fpga);
-USE_LITE_KERNEL(calib_once, kFPGA, kFP16, kNHWC, fp32_to_fp16_fpga);
-USE_LITE_KERNEL(calib_once, kFPGA, kFP16, kNHWC, fp16_to_fp32_fpga);
-USE_LITE_KERNEL(layout, kFPGA, kAny, kNHWC, hwc_to_chw_fpga_fp16);
-USE_LITE_KERNEL(layout, kFPGA, kAny, kNHWC, chw_to_hwc_fpga_fp16);
-USE_LITE_KERNEL(layout_once, kFPGA, kAny, kNHWC, hwc_to_chw_fpga_fp16);
-USE_LITE_KERNEL(layout_once, kFPGA, kAny, kNHWC, chw_to_hwc_fpga_fp16);
-#endif
diff --git a/lite/api/_paddle_use_ops.h b/lite/api/_paddle_use_ops.h
deleted file mode 100644
index 890c57c4aa6de9749ab2e173e124e518950431bd..0000000000000000000000000000000000000000
--- a/lite/api/_paddle_use_ops.h
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-
-// ATTENTION This can only include in a .cc file.
-
-#include "paddle_lite_factory_helper.h" // NOLINT
-
-USE_LITE_OP(mul);
-USE_LITE_OP(matmul);
-USE_LITE_OP(fc);
-USE_LITE_OP(relu);
-USE_LITE_OP(relu6);
-USE_LITE_OP(scale);
-USE_LITE_OP(feed);
-USE_LITE_OP(lrn);
-USE_LITE_OP(decode_bboxes);
-USE_LITE_OP(box_coder);
-USE_LITE_OP(fetch);
-USE_LITE_OP(io_copy);
-USE_LITE_OP(io_copy_once);
-USE_LITE_OP(elementwise_add)
-USE_LITE_OP(elementwise_sub)
-USE_LITE_OP(elementwise_mul)
-USE_LITE_OP(elementwise_max)
-USE_LITE_OP(elementwise_div)
-USE_LITE_OP(fusion_elementwise_add_activation)
-USE_LITE_OP(fusion_elementwise_mul_activation)
-USE_LITE_OP(fusion_elementwise_max_activation)
-USE_LITE_OP(fusion_elementwise_div_activation)
-USE_LITE_OP(square)
-USE_LITE_OP(softmax)
-USE_LITE_OP(dropout)
-USE_LITE_OP(concat)
-USE_LITE_OP(conv2d)
-USE_LITE_OP(depthwise_conv2d)
-USE_LITE_OP(pool2d)
-USE_LITE_OP(batch_norm)
-USE_LITE_OP(fusion_elementwise_sub_activation)
-USE_LITE_OP(transpose)
-USE_LITE_OP(transpose2)
-USE_LITE_OP(arg_max)
-USE_LITE_OP(axpy)
-USE_LITE_OP(leaky_relu)
-USE_LITE_OP(relu_clipped)
-USE_LITE_OP(prelu)
-USE_LITE_OP(sigmoid)
-USE_LITE_OP(tanh)
-USE_LITE_OP(swish)
-USE_LITE_OP(log)
-USE_LITE_OP(exp)
-USE_LITE_OP(conv2d_transpose)
-USE_LITE_OP(negative)
-USE_LITE_OP(pad2d)
-USE_LITE_OP(power)
-USE_LITE_OP(shuffle_channel)
-USE_LITE_OP(yolo_box)
-USE_LITE_OP(bilinear_interp)
-USE_LITE_OP(nearest_interp)
-USE_LITE_OP(reduce_mean)
-USE_LITE_OP(stack)
-
-USE_LITE_OP(assign);
-USE_LITE_OP(crop)
-USE_LITE_OP(prior_box)
-USE_LITE_OP(density_prior_box)
-USE_LITE_OP(reshape)
-USE_LITE_OP(reshape2)
-USE_LITE_OP(flatten)
-USE_LITE_OP(flatten2)
-USE_LITE_OP(split)
-USE_LITE_OP(fake_quantize_moving_average_abs_max);
-USE_LITE_OP(fake_dequantize_max_abs);
-USE_LITE_OP(fake_quantize_range_abs_max);
-USE_LITE_OP(calib);
-USE_LITE_OP(calib_once);
-USE_LITE_OP(norm);
-USE_LITE_OP(layout);
-USE_LITE_OP(layout_once);
-USE_LITE_OP(im2sequence);
-USE_LITE_OP(sequence_softmax);
-USE_LITE_OP(logical_xor);
-USE_LITE_OP(logical_and);
-USE_LITE_OP(less_than);
-USE_LITE_OP(top_k);
-USE_LITE_OP(increment);
-USE_LITE_OP(write_to_array);
-USE_LITE_OP(read_from_array);
-USE_LITE_OP(gru_unit)
-USE_LITE_OP(gru)
-USE_LITE_OP(beam_search_decode)
-USE_LITE_OP(beam_search)
-USE_LITE_OP(fill_constant)
-USE_LITE_OP(while)
-USE_LITE_OP(lod_reset)
-USE_LITE_OP(lookup_table)
-USE_LITE_OP(multiclass_nms)
-USE_LITE_OP(graph_op)
-USE_LITE_OP(sequence_expand)
-USE_LITE_OP(sequence_pool)
-USE_LITE_OP(reduce_max)
-USE_LITE_OP(is_empty)
-USE_LITE_OP(shape)
-USE_LITE_OP(slice)
-USE_LITE_OP(cast)
-USE_LITE_OP(affine_channel)
-USE_LITE_OP(anchor_generator)
-USE_LITE_OP(generate_proposals)
-USE_LITE_OP(squeeze) // for x2paddle
-USE_LITE_OP(squeeze2) // for x2paddle
-USE_LITE_OP(expand) // for x2paddle
-USE_LITE_OP(roi_align)
-USE_LITE_OP(box_clip)
-USE_LITE_OP(assign_value)
-USE_LITE_OP(hard_sigmoid)
diff --git a/lite/api/android/.gitignore b/lite/api/android/.gitignore
deleted file mode 100644
index a1d6334395db70de3c8e089d14af0f3c2a3be430..0000000000000000000000000000000000000000
--- a/lite/api/android/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/bin/
-.classpath
diff --git a/lite/api/android/CMakeLists.txt b/lite/api/android/CMakeLists.txt
deleted file mode 100644
index 7f31f7e9479580d9e7a47804db2a45b4f6d4e1bb..0000000000000000000000000000000000000000
--- a/lite/api/android/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-if ((NOT LITE_WITH_JAVA) OR (NOT LITE_WITH_ARM))
- return()
-endif()
-
-add_subdirectory(jni)
diff --git a/lite/api/android/jni/.gitignore b/lite/api/android/jni/.gitignore
deleted file mode 100644
index 1299d2738c0d3321a46024d31e24049bef9ace9a..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/PaddleListTest.class
-/PaddleLite.class
-/bin/
diff --git a/lite/api/android/jni/CMakeLists.txt b/lite/api/android/jni/CMakeLists.txt
deleted file mode 100644
index b2f5671a7b3de6cd4d22b4f1fcfd17f3f5f48310..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/CMakeLists.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-if ((NOT LITE_WITH_ARM) OR (NOT LITE_WITH_JAVA))
- return()
-endif()
-
-include(UseJava)
-find_package(Java REQUIRED)
-
-# We are only interested in finding jni.h: we do not care about extended JVM
-# functionality or the AWT library.
-set(JAVA_AWT_LIBRARY NotNeeded)
-set(JAVA_JVM_LIBRARY NotNeeded)
-set(JAVA_INCLUDE_PATH2 NotNeeded)
-set(JAVA_AWT_INCLUDE_PATH NotNeeded)
-find_package(JNI REQUIRED)
-
-# Generate PaddlePredictor.jar
-include_directories(${JNI_INCLUDE_DIRS})
-add_jar(PaddlePredictor
- src/com/baidu/paddle/lite/ConfigBase.java
- src/com/baidu/paddle/lite/CxxConfig.java
- src/com/baidu/paddle/lite/MobileConfig.java
- src/com/baidu/paddle/lite/PaddleLiteInitializer.java
- src/com/baidu/paddle/lite/PaddlePredictor.java
- src/com/baidu/paddle/lite/PowerMode.java
- src/com/baidu/paddle/lite/Place.java
- src/com/baidu/paddle/lite/Tensor.java)
-get_target_property(_jarFile PaddlePredictor JAR_FILE)
-get_target_property(_classDir PaddlePredictor CLASSDIR)
-set(_stubDir "${CMAKE_CURRENT_BINARY_DIR}")
-
-# Generate native headers
-add_custom_target(
- paddle_lite_jni_header ALL
- COMMAND ${Java_JAVAH_EXECUTABLE} -verbose
- -classpath ${_classDir}
- -o "${CMAKE_BINARY_DIR}/lite/api/android/jni/native/paddle_lite_jni.h"
- -jni
- com.baidu.paddle.lite.PaddlePredictor
- COMMAND ${Java_JAVAH_EXECUTABLE} -verbose
- -classpath ${_classDir}
- -o "${CMAKE_BINARY_DIR}/lite/api/android/jni/native/tensor_jni.h"
- -jni
- com.baidu.paddle.lite.Tensor
- COMMAND ${Java_JAVAH_EXECUTABLE} -verbose
- -classpath ${_classDir}
- -o "${CMAKE_BINARY_DIR}/lite/api/android/jni/native/paddle_init_jni.h"
- -jni
- com.baidu.paddle.lite.PaddleLiteInitializer
- DEPENDS PaddlePredictor
-)
-
-add_subdirectory(native)
diff --git a/lite/api/android/jni/native/CMakeLists.txt b/lite/api/android/jni/native/CMakeLists.txt
deleted file mode 100644
index afe051a437f4de83931bdaa3f2d03427b78d13ad..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/native/CMakeLists.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-# Generate paddle_lite_jni.so
-
-if (LITE_ON_TINY_PUBLISH)
- set(CMAKE_CXX_FLAGS_RELEASE "-Os -DNDEBUG")
- set(CMAKE_C_FLAGS_RELEASE "-Os -DNDEBUG")
- set(lib_DEPS light_api paddle_api paddle_api_light)
-else()
- set(lib_DEPS light_api cxx_api paddle_api_full paddle_api paddle_api_light)
-endif()
-
-include_directories(${JNI_INCLUDE_DIRS} ${_classDir} ${_stubDir})
-if (NOT LITE_ON_TINY_PUBLISH)
- lite_cc_library(paddle_lite_jni MODULE
- SRCS paddle_lite_jni.cc tensor_jni.cc
- DEPS ${lib_DEPS}
- ARM_DEPS ${arm_kernels} NPU_DEPS ${npu_kernels})
- # Unlike static library, module library has to link target to be able to work
- # as a single .so lib.
- target_link_libraries(paddle_lite_jni ${lib_DEPS} ${arm_kernels} ${npu_kernels})
-else()
- add_library(paddle_lite_jni SHARED "")
- target_sources(paddle_lite_jni PUBLIC ${__lite_cc_files} paddle_lite_jni.cc tensor_jni.cc)
- add_dependencies(paddle_lite_jni op_list_h kernel_list_h)
-endif()
-
-if (APPLE)
- # MacOS only accepts JNI lib ends with .jnilib or .dylib
- set_target_properties(paddle_lite_jni PROPERTIES SUFFIX ".jnilib")
-elseif (WIN32)
- # Windows only accepts JNI lib ends with .dll
- set_target_properties(paddle_lite_jni PROPERTIES SUFFIX ".dll")
-endif (APPLE)
diff --git a/lite/api/android/jni/native/convert_util_jni.h b/lite/api/android/jni/native/convert_util_jni.h
deleted file mode 100644
index ae987c330dd0ad415a2da783366483c58789c56e..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/native/convert_util_jni.h
+++ /dev/null
@@ -1,197 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-#pragma once
-
-#include
-#include
-#include
-
-#include "lite/api/light_api.h"
-#include "lite/api/paddle_api.h"
-#include "lite/api/paddle_place.h"
-
-namespace paddle {
-namespace lite_api {
-
-inline std::string jstring_to_cpp_string(JNIEnv *env, jstring jstr) {
- // In java, a unicode char will be encoded using 2 bytes (utf16).
- // so jstring will contain characters utf16. std::string in c++ is
- // essentially a string of bytes, not characters, so if we want to
- // pass jstring from JNI to c++, we have convert utf16 to bytes.
- if (!jstr) {
- return "";
- }
- const jclass stringClass = env->GetObjectClass(jstr);
- const jmethodID getBytes =
- env->GetMethodID(stringClass, "getBytes", "(Ljava/lang/String;)[B");
- const jbyteArray stringJbytes = (jbyteArray)env->CallObjectMethod(
- jstr, getBytes, env->NewStringUTF("UTF-8"));
-
- size_t length = (size_t)env->GetArrayLength(stringJbytes);
- jbyte *pBytes = env->GetByteArrayElements(stringJbytes, NULL);
-
- std::string ret = std::string(reinterpret_cast(pBytes), length);
- env->ReleaseByteArrayElements(stringJbytes, pBytes, JNI_ABORT);
-
- env->DeleteLocalRef(stringJbytes);
- env->DeleteLocalRef(stringClass);
- return ret;
-}
-
-inline jfloatArray cpp_array_to_jfloatarray(JNIEnv *env,
- const float *buf,
- int64_t len) {
- jfloatArray result = env->NewFloatArray(len);
- env->SetFloatArrayRegion(result, 0, len, buf);
- return result;
-}
-
-inline jintArray cpp_array_to_jintarray(JNIEnv *env,
- const int *buf,
- int64_t len) {
- jintArray result = env->NewIntArray(len);
- env->SetIntArrayRegion(result, 0, len, buf);
- return result;
-}
-
-inline jbyteArray cpp_array_to_jbytearray(JNIEnv *env,
- const int8_t *buf,
- int64_t len) {
- jbyteArray result = env->NewByteArray(len);
- env->SetByteArrayRegion(result, 0, len, buf);
- return result;
-}
-
-inline jlongArray int64_vector_to_jlongarray(JNIEnv *env,
- const std::vector &vec) {
- jlongArray result = env->NewLongArray(vec.size());
- jlong *buf = new jlong[vec.size()];
- for (size_t i = 0; i < vec.size(); ++i) {
- buf[i] = (jlong)vec[i];
- }
- env->SetLongArrayRegion(result, 0, vec.size(), buf);
- delete[] buf;
- return result;
-}
-
-inline std::vector jlongarray_to_int64_vector(JNIEnv *env,
- jlongArray dims) {
- int dim_size = env->GetArrayLength(dims);
- jlong *dim_nums = env->GetLongArrayElements(dims, nullptr);
- std::vector dim_vec(dim_nums, dim_nums + dim_size);
- env->ReleaseLongArrayElements(dims, dim_nums, 0);
- return dim_vec;
-}
-
-/**
- * Converts Java com.baidu.paddle.lite.Place to c++ paddle::lite_api::Place.
- */
-inline Place jplace_to_cpp_place(JNIEnv *env, jobject java_place) {
- jclass place_jclazz = env->GetObjectClass(java_place);
-
- jmethodID target_method =
- env->GetMethodID(place_jclazz, "getTargetInt", "()I");
- jmethodID precision_method =
- env->GetMethodID(place_jclazz, "getPrecisionInt", "()I");
- jmethodID data_layout_method =
- env->GetMethodID(place_jclazz, "getDataLayoutInt", "()I");
- jmethodID device_method = env->GetMethodID(place_jclazz, "getDevice", "()I");
-
- int target = env->CallIntMethod(java_place, target_method);
- int precision = env->CallIntMethod(java_place, precision_method);
- int data_layout = env->CallIntMethod(java_place, data_layout_method);
- int device = env->CallIntMethod(java_place, device_method);
-
- return Place(static_cast(target),
- static_cast(precision),
- static_cast(data_layout),
- device);
-}
-
-inline CxxConfig jcxxconfig_to_cpp_cxxconfig(JNIEnv *env, jobject jcxxconfig) {
- jclass cxxconfig_jclazz = env->GetObjectClass(jcxxconfig);
-
- jmethodID model_dir_method =
- env->GetMethodID(cxxconfig_jclazz, "getModelDir", "()Ljava/lang/String;");
- jmethodID preferred_place_method = env->GetMethodID(
- cxxconfig_jclazz, "getPreferredPlace", "()Lcom/baidu/paddle/lite/Place;");
- jmethodID valid_places_method = env->GetMethodID(
- cxxconfig_jclazz, "getValidPlaces", "()[Lcom/baidu/paddle/lite/Place;");
-
- CxxConfig config;
-
- jstring java_model_dir =
- (jstring)env->CallObjectMethod(jcxxconfig, model_dir_method);
- if (java_model_dir != nullptr) {
- std::string cpp_model_dir = jstring_to_cpp_string(env, java_model_dir);
- config.set_model_dir(cpp_model_dir);
- }
-
- jobject java_preferred_place =
- env->CallObjectMethod(jcxxconfig, preferred_place_method);
- if (java_preferred_place != nullptr) {
- Place cpp_preferred_place = jplace_to_cpp_place(env, java_preferred_place);
- config.set_preferred_place(cpp_preferred_place);
- }
-
- jobject object_valid_places =
- env->CallObjectMethod(jcxxconfig, valid_places_method);
- jobjectArray *java_valid_places =
- reinterpret_cast(&object_valid_places);
- if (java_valid_places != nullptr) {
- int valid_place_count = env->GetArrayLength(*java_valid_places);
- std::vector cpp_valid_places;
- for (int i = 0; i < valid_place_count; ++i) {
- jobject jplace = env->GetObjectArrayElement(*java_valid_places, i);
- cpp_valid_places.push_back(jplace_to_cpp_place(env, jplace));
- }
- config.set_valid_places(cpp_valid_places);
- }
-
- return config;
-}
-
-inline MobileConfig jmobileconfig_to_cpp_mobileconfig(JNIEnv *env,
- jobject jmobileconfig) {
- jclass mobileconfig_jclazz = env->GetObjectClass(jmobileconfig);
-
- MobileConfig config;
-
- // set model dir
- jmethodID model_dir_method = env->GetMethodID(
- mobileconfig_jclazz, "getModelDir", "()Ljava/lang/String;");
- jstring java_model_dir =
- (jstring)env->CallObjectMethod(jmobileconfig, model_dir_method);
- if (java_model_dir != nullptr) {
- std::string cpp_model_dir = jstring_to_cpp_string(env, java_model_dir);
- config.set_model_dir(cpp_model_dir);
- }
-
- // set threads
- jmethodID threads_method =
- env->GetMethodID(mobileconfig_jclazz, "getThreads", "()I");
- int threads = env->CallIntMethod(jmobileconfig, threads_method);
- config.set_threads(threads);
-
- // set power mode
- jmethodID power_mode_method =
- env->GetMethodID(mobileconfig_jclazz, "getPowerModeInt", "()I");
- int power_mode = env->CallIntMethod(jmobileconfig, power_mode_method);
- config.set_power_mode(static_cast(power_mode));
-
- return config;
-}
-
-} // namespace lite_api
-} // namespace paddle
diff --git a/lite/api/android/jni/native/paddle_lite_jni.cc b/lite/api/android/jni/native/paddle_lite_jni.cc
deleted file mode 100644
index aa4ece68189f002c9e183a042510021fcb602f75..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/native/paddle_lite_jni.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-#include "lite/api/android/jni/native/paddle_lite_jni.h"
-
-#include
-#include
-#include
-#include
-
-#include "lite/api/android/jni/native/convert_util_jni.h"
-#include "lite/api/light_api.h"
-#include "lite/api/paddle_api.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-namespace paddle {
-namespace lite_api {
-
-inline static std::shared_ptr *getPaddlePredictorPointer(
- JNIEnv *env, jobject jpaddle_predictor) {
- jclass jclazz = env->GetObjectClass(jpaddle_predictor);
- jfieldID jfield = env->GetFieldID(jclazz, "cppPaddlePredictorPointer", "J");
- jlong java_pointer = env->GetLongField(jpaddle_predictor, jfield);
- std::shared_ptr *ptr =
- reinterpret_cast *>(java_pointer);
- return ptr;
-}
-
-JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_lite_PaddlePredictor_run(
- JNIEnv *env, jobject jpaddle_predictor) {
- std::shared_ptr *predictor =
- getPaddlePredictorPointer(env, jpaddle_predictor);
- if (predictor == nullptr || (*predictor == nullptr)) {
- return JNI_FALSE;
- }
- (*predictor)->Run();
- return JNI_TRUE;
-}
-
-JNIEXPORT jboolean JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_saveOptimizedModel(
- JNIEnv *env, jobject jpaddle_predictor, jstring model_dir) {
- std::shared_ptr *predictor =
- getPaddlePredictorPointer(env, jpaddle_predictor);
- if (predictor == nullptr || (*predictor == nullptr)) {
- return JNI_FALSE;
- }
- (*predictor)->SaveOptimizedModel(jstring_to_cpp_string(env, model_dir));
- return JNI_TRUE;
-}
-
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_getInputCppTensorPointer(
- JNIEnv *env, jobject jpaddle_predictor, jint offset) {
- std::shared_ptr *predictor =
- getPaddlePredictorPointer(env, jpaddle_predictor);
- if (predictor == nullptr || (*predictor == nullptr)) {
- return 0;
- }
- std::unique_ptr tensor =
- (*predictor)->GetInput(static_cast(offset));
- std::unique_ptr *cpp_tensor_pointer =
- new std::unique_ptr(std::move(tensor));
- return reinterpret_cast(cpp_tensor_pointer);
-}
-
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_getOutputCppTensorPointer(
- JNIEnv *env, jobject jpaddle_predictor, jint offset) {
- std::shared_ptr *predictor =
- getPaddlePredictorPointer(env, jpaddle_predictor);
- if (predictor == nullptr || (*predictor == nullptr)) {
- return 0;
- }
- std::unique_ptr tensor =
- (*predictor)->GetOutput(static_cast(offset));
- std::unique_ptr *cpp_tensor_pointer =
- new std::unique_ptr(std::move(tensor));
- return reinterpret_cast(cpp_tensor_pointer);
-}
-
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_getCppTensorPointerByName(
- JNIEnv *env, jobject jpaddle_predictor, jstring name) {
- std::string cpp_name = jstring_to_cpp_string(env, name);
- std::shared_ptr *predictor =
- getPaddlePredictorPointer(env, jpaddle_predictor);
- if (predictor == nullptr || (*predictor == nullptr)) {
- return 0;
- }
- std::unique_ptr tensor = (*predictor)->GetTensor(cpp_name);
- std::unique_ptr *cpp_tensor_pointer =
- new std::unique_ptr(std::move(tensor));
- return reinterpret_cast(cpp_tensor_pointer);
-}
-
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_newCppPaddlePredictor__Lcom_baidu_\
-paddle_lite_CxxConfig_2(JNIEnv *env,
- jobject jpaddle_predictor,
- jobject jcxxconfig) {
-#ifndef LITE_ON_TINY_PUBLISH
- CxxConfig config = jcxxconfig_to_cpp_cxxconfig(env, jcxxconfig);
- std::shared_ptr predictor =
- paddle::lite_api::CreatePaddlePredictor(config);
- if (predictor == nullptr) {
- return 0;
- }
- std::shared_ptr *predictor_pointer =
- new std::shared_ptr(predictor);
- return reinterpret_cast(predictor_pointer);
-#else
- return 0;
-#endif
-}
-
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_newCppPaddlePredictor__Lcom_baidu_\
-paddle_lite_MobileConfig_2(JNIEnv *env,
- jobject jpaddle_predictor,
- jobject jmobileconfig) {
- MobileConfig config = jmobileconfig_to_cpp_mobileconfig(env, jmobileconfig);
- std::shared_ptr predictor =
- paddle::lite_api::CreatePaddlePredictor(config);
- if (predictor == nullptr) {
- return 0;
- }
- std::shared_ptr *predictor_pointer =
- new std::shared_ptr(predictor);
- return reinterpret_cast(predictor_pointer);
-}
-
-JNIEXPORT jboolean JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_deleteCppPaddlePredictor(
- JNIEnv *env, jobject jpaddle_predictor, jlong java_pointer) {
- if (java_pointer == 0) {
- return JNI_FALSE;
- }
- std::shared_ptr *ptr =
- reinterpret_cast *>(java_pointer);
- ptr->reset();
- delete ptr;
- return JNI_TRUE;
-}
-
-} // namespace lite_api
-} // namespace paddle
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/lite/api/android/jni/native/paddle_lite_jni.h b/lite/api/android/jni/native/paddle_lite_jni.h
deleted file mode 100644
index 913e9a4c3a87ca3e649b86d020c3a4a8fd458a0b..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/native/paddle_lite_jni.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include
-/* Header for class com_baidu_paddle_lite_PaddlePredictor */
-#include "lite/api/paddle_lite_factory_helper.h"
-#include "lite/api/paddle_use_kernels.h"
-#include "lite/api/paddle_use_ops.h"
-#ifndef LITE_ON_TINY_PUBLISH
-#include "lite/api/paddle_use_passes.h"
-#endif
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-namespace paddle {
-namespace lite_api {
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: run
- * Signature: ()Z
- */
-JNIEXPORT jboolean JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_run(JNIEnv *, jobject);
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: saveOptimizedModel
- * Signature: (Ljava/lang/String;)Z
- */
-JNIEXPORT jboolean JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_saveOptimizedModel(JNIEnv *,
- jobject,
- jstring);
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: getInputCppTensorPointer
- * Signature: (I)J
- */
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_getInputCppTensorPointer(JNIEnv *,
- jobject,
- jint);
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: getOutputCppTensorPointer
- * Signature: (I)J
- */
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_getOutputCppTensorPointer(JNIEnv *,
- jobject,
- jint);
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: getCppTensorPointerByName
- * Signature: (Ljava/lang/String;)J
- */
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_getCppTensorPointerByName(JNIEnv *,
- jobject,
- jstring);
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: newCppPaddlePredictor
- * Signature: (Lcom/baidu/paddle/lite/CxxConfig;)J
- */
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_newCppPaddlePredictor__Lcom_baidu_\
-paddle_lite_CxxConfig_2(JNIEnv *, jobject, jobject);
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: newCppPaddlePredictor
- * Signature: (Lcom/baidu/paddle/lite/MobileConfig;)J
- */
-JNIEXPORT jlong JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_newCppPaddlePredictor__Lcom_baidu_\
-paddle_lite_MobileConfig_2(JNIEnv *, jobject, jobject);
-
-/*
- * Class: com_baidu_paddle_lite_PaddlePredictor
- * Method: deleteCppPaddlePredictor
- * Signature: (J)Z
- */
-JNIEXPORT jboolean JNICALL
-Java_com_baidu_paddle_lite_PaddlePredictor_deleteCppPaddlePredictor(JNIEnv *,
- jobject,
- jlong);
-
-} // namespace lite_api
-} // namespace paddle
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/lite/api/android/jni/native/tensor_jni.cc b/lite/api/android/jni/native/tensor_jni.cc
deleted file mode 100644
index 59cafa19399c4d265915e2dac8653e9ed7d10851..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/native/tensor_jni.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-#include "lite/api/android/jni/native/tensor_jni.h"
-
-#include
-#include
-
-#include "lite/api/android/jni/native/convert_util_jni.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-namespace paddle {
-namespace lite_api {
-
-inline static int64_t product(const std::vector &vec) {
- if (vec.empty()) {
- return 0;
- }
- int64_t result = 1;
- for (int64_t d : vec) {
- result *= d;
- }
- return result;
-}
-
-inline static bool is_const_tensor(JNIEnv *env, jobject jtensor) {
- jclass jclazz = env->GetObjectClass(jtensor);
- jfieldID jfield = env->GetFieldID(jclazz, "readOnly", "Z");
- jboolean read_only = env->GetBooleanField(jtensor, jfield);
- return static_cast(read_only);
-}
-
-inline static std::unique_ptr *get_writable_tensor_pointer(
- JNIEnv *env, jobject jtensor) {
- jclass jclazz = env->GetObjectClass(jtensor);
- jfieldID jfield = env->GetFieldID(jclazz, "cppTensorPointer", "J");
- jlong java_pointer = env->GetLongField(jtensor, jfield);
- std::unique_ptr *ptr =
- reinterpret_cast *>(java_pointer);
- return ptr;
-}
-
-inline static std::unique_ptr *get_read_only_tensor_pointer(
- JNIEnv *env, jobject jtensor) {
- jclass jclazz = env->GetObjectClass(jtensor);
- jfieldID jfield = env->GetFieldID(jclazz, "cppTensorPointer", "J");
- jlong java_pointer = env->GetLongField(jtensor, jfield);
- std::unique_ptr *ptr =
- reinterpret_cast *>(java_pointer);
- return ptr;
-}
-
-JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_lite_Tensor_nativeResize(
- JNIEnv *env, jobject jtensor, jlongArray dims) {
- std::unique_ptr *tensor = get_writable_tensor_pointer(env, jtensor);
- if (tensor == nullptr || (*tensor == nullptr)) {
- return JNI_FALSE;
- }
- std::vector shape = jlongarray_to_int64_vector(env, dims);
- (*tensor)->Resize(shape);
- return JNI_TRUE;
-}
-
-JNIEXPORT jlongArray JNICALL
-Java_com_baidu_paddle_lite_Tensor_shape(JNIEnv *env, jobject jtensor) {
- if (is_const_tensor(env, jtensor)) {
- std::unique_ptr *tensor =
- get_read_only_tensor_pointer(env, jtensor);
- std::vector shape = (*tensor)->shape();
- return int64_vector_to_jlongarray(env, shape);
- } else {
- std::unique_ptr *tensor = get_writable_tensor_pointer(env, jtensor);
- std::vector shape = (*tensor)->shape();
- return int64_vector_to_jlongarray(env, shape);
- }
-}
-
-JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_lite_Tensor_nativeSetData___3F(
- JNIEnv *env, jobject jtensor, jfloatArray buf) {
- std::unique_ptr *tensor = get_writable_tensor_pointer(env, jtensor);
- if (tensor == nullptr || (*tensor == nullptr)) {
- return JNI_FALSE;
- }
- int64_t buf_size = (int64_t)env->GetArrayLength(buf);
- if (buf_size != product((*tensor)->shape())) {
- return JNI_FALSE;
- }
-
- float *input = (*tensor)->mutable_data();
- env->GetFloatArrayRegion(buf, 0, buf_size, input);
- return JNI_TRUE;
-}
-
-JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_lite_Tensor_nativeSetData___3B(
- JNIEnv *env, jobject jtensor, jbyteArray buf) {
- std::unique_ptr *tensor = get_writable_tensor_pointer(env, jtensor);
- if (tensor == nullptr || (*tensor == nullptr)) {
- return JNI_FALSE;
- }
- int64_t buf_size = (int64_t)env->GetArrayLength(buf);
- if (buf_size != product((*tensor)->shape())) {
- return JNI_FALSE;
- }
-
- int8_t *input = (*tensor)->mutable_data();
- env->GetByteArrayRegion(buf, 0, buf_size, input);
- return JNI_TRUE;
-}
-
-JNIEXPORT jfloatArray JNICALL
-Java_com_baidu_paddle_lite_Tensor_getFloatData(JNIEnv *env, jobject jtensor) {
- if (is_const_tensor(env, jtensor)) {
- std::unique_ptr *tensor =
- get_read_only_tensor_pointer(env, jtensor);
- return cpp_array_to_jfloatarray(
- env, (*tensor)->data(), product((*tensor)->shape()));
- } else {
- std::unique_ptr *tensor = get_writable_tensor_pointer(env, jtensor);
- return cpp_array_to_jfloatarray(
- env, (*tensor)->data(), product((*tensor)->shape()));
- }
-}
-
-JNIEXPORT jbyteArray JNICALL
-Java_com_baidu_paddle_lite_Tensor_getByteData(JNIEnv *env, jobject jtensor) {
- if (is_const_tensor(env, jtensor)) {
- std::unique_ptr *tensor =
- get_read_only_tensor_pointer(env, jtensor);
- return cpp_array_to_jbytearray(
- env, (*tensor)->data(), product((*tensor)->shape()));
- } else {
- std::unique_ptr *tensor = get_writable_tensor_pointer(env, jtensor);
- return cpp_array_to_jbytearray(
- env, (*tensor)->data(), product((*tensor)->shape()));
- }
-}
-
-JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_lite_Tensor_deleteCppTensor(
- JNIEnv *env, jobject jtensor, jlong java_pointer) {
- if (java_pointer == 0) {
- return JNI_FALSE;
- }
- std::unique_ptr *ptr =
- reinterpret_cast *>(java_pointer);
- ptr->reset();
- delete ptr;
- return JNI_TRUE;
-}
-
-} // namespace lite_api
-} // namespace paddle
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/lite/api/android/jni/native/tensor_jni.h b/lite/api/android/jni/native/tensor_jni.h
deleted file mode 100644
index 34c35b6a76f777895dbe88dc5eadf48c659ee544..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/native/tensor_jni.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include
-/* Header for class com_baidu_paddle_lite_Tensor */
-
-#ifndef PADDLE_FLUID_LITE_API_ANDROID_JNI_NATIVE_TENSOR_JNI_H_
-#define PADDLE_FLUID_LITE_API_ANDROID_JNI_NATIVE_TENSOR_JNI_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-namespace paddle {
-namespace lite_api {
-
-/*
- * Class: com_baidu_paddle_lite_Tensor
- * Method: shape
- * Signature: ()[J
- */
-JNIEXPORT jlongArray JNICALL Java_com_baidu_paddle_lite_Tensor_shape(JNIEnv *,
- jobject);
-
-/*
- * Class: com_baidu_paddle_lite_Tensor
- * Method: getFloatData
- * Signature: ()[F
- */
-JNIEXPORT jfloatArray JNICALL
-Java_com_baidu_paddle_lite_Tensor_getFloatData(JNIEnv *, jobject);
-
-/*
- * Class: com_baidu_paddle_lite_Tensor
- * Method: getByteData
- * Signature: ()[B
- */
-JNIEXPORT jbyteArray JNICALL
-Java_com_baidu_paddle_lite_Tensor_getByteData(JNIEnv *, jobject);
-
-/*
- * Class: com_baidu_paddle_lite_Tensor
- * Method: nativeResize
- * Signature: ([J)Z
- */
-JNIEXPORT jboolean JNICALL
-Java_com_baidu_paddle_lite_Tensor_nativeResize(JNIEnv *, jobject, jlongArray);
-
-/*
- * Class: com_baidu_paddle_lite_Tensor
- * Method: nativeSetData
- * Signature: ([F)Z
- */
-JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_lite_Tensor_nativeSetData___3F(
- JNIEnv *, jobject, jfloatArray);
-
-/*
- * Class: com_baidu_paddle_lite_Tensor
- * Method: nativeSetData
- * Signature: ([B)Z
- */
-JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_lite_Tensor_nativeSetData___3B(
- JNIEnv *, jobject, jbyteArray);
-
-/*
- * Class: com_baidu_paddle_lite_Tensor
- * Method: deleteCppTensor
- * Signature: (J)Z
- */
-JNIEXPORT jboolean JNICALL
-Java_com_baidu_paddle_lite_Tensor_deleteCppTensor(JNIEnv *, jobject, jlong);
-
-} // namespace lite_api
-} // namespace paddle
-
-#ifdef __cplusplus
-}
-#endif
-#endif // PADDLE_FLUID_LITE_API_ANDROID_JNI_NATIVE_TENSOR_JNI_H_
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/.gitignore b/lite/api/android/jni/src/com/baidu/paddle/lite/.gitignore
deleted file mode 100644
index 870ec275e827c663c24ab374bbec8c37c8f3d8b0..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/PaddleLite.class
-/PaddleLiteTest.class
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/ConfigBase.java b/lite/api/android/jni/src/com/baidu/paddle/lite/ConfigBase.java
deleted file mode 100644
index 51115b30167352f63b873cec0c8524a6b746916a..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/ConfigBase.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-/**
- * Base class for all configurations.
- */
-public class ConfigBase {
-
- protected String modelDir;
-
- public String getModelDir() {
- return modelDir;
- }
-
- public void setModelDir(String modelDir) {
- this.modelDir = modelDir;
- }
-
-}
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/CxxConfig.java b/lite/api/android/jni/src/com/baidu/paddle/lite/CxxConfig.java
deleted file mode 100644
index 906293c92fe379caf7e05c805cbbf9a55f0896bd..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/CxxConfig.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-/**
- * CxxConfig is the configuration for the Full feature predictor.
- */
-public class CxxConfig extends ConfigBase {
-
- protected Place preferredPlace;
- protected Place[] validPlaces;
-
- public Place getPreferredPlace() {
- return preferredPlace;
- }
-
- public void setPreferredPlace(Place preferredPlace) {
- this.preferredPlace = preferredPlace;
- }
-
- public Place[] getValidPlaces() {
- return validPlaces;
- }
-
- public void setValidPlaces(Place[] validPlaces) {
- this.validPlaces = validPlaces;
- }
-}
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/MobileConfig.java b/lite/api/android/jni/src/com/baidu/paddle/lite/MobileConfig.java
deleted file mode 100644
index 5c71db0c92b344e44ea2927305580de1be293f75..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/MobileConfig.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-/**
- * MobileConfig is the config for the light weight predictor, it will skip IR
- * optimization or other unnecessary stages.
- */
-public class MobileConfig extends ConfigBase {
-
- /**
- * Set power mode.
- *
- * @return
- */
- public void setPowerMode(PowerMode powerMode) {
- this.powerMode = powerMode;
- }
-
- /**
- * Returns power mode.
- *
- * @return power mode
- */
- public PowerMode getPowerMode() {
- return powerMode;
- }
-
- /**
- * Set threads num.
- *
- * @return
- */
- public void setThreads(int threads) {
- this.threads = threads;
- }
-
- /**
- * Returns threads num.
- *
- * @return threads num
- */
- public int getThreads() {
- return threads;
- }
-
- /**
- * Returns power mode as enum int value.
- *
- * @return power mode as enum int value
- */
- public int getPowerModeInt() {
- return powerMode.value();
- }
-
- private PowerMode powerMode = PowerMode.LITE_POWER_HIGH;
- private int threads = 1;
-}
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/PaddleLiteInitializer.java b/lite/api/android/jni/src/com/baidu/paddle/lite/PaddleLiteInitializer.java
deleted file mode 100644
index 876d7cebd4427c7bde2cc040fd271cb652b8ccf5..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/PaddleLiteInitializer.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package com.baidu.paddle.lite;
-
-/**
- * Initializer for PaddleLite. The initialization methods are called by package
- * classes only. Public users don't have to call them. Public users can get
- * PaddleLite information constants such as JNI lib name in this class.
- */
-public class PaddleLiteInitializer {
-
- /** name of C++ JNI lib */
- public final static String JNI_LIB_NAME = "paddle_lite_jni";
-
- /**
- * loads the C++ JNI lib. We only call it in our package, so it shouldn't be
- * visible to public users.
- *
- * @return true if initialize successfully.
- */
- protected static boolean init() {
- System.loadLibrary(JNI_LIB_NAME);
- return true;
- }
-}
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/PaddlePredictor.java b/lite/api/android/jni/src/com/baidu/paddle/lite/PaddlePredictor.java
deleted file mode 100644
index d022fd7d61816e3cc0e01dbac227210e1061099e..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/PaddlePredictor.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-/** Java Native Interface (JNI) class for Paddle Lite APIs */
-public class PaddlePredictor {
-
- /**
- * Java doesn't have pointer. To maintain the life cycle of underneath C++
- * PaddlePredictor object, we use a long value to maintain it.
- */
- private long cppPaddlePredictorPointer;
-
- /**
- * Constructor of a PaddlePredictor.
- *
- * @param config the input configuration.
- */
- public PaddlePredictor(ConfigBase config) {
- init(config);
- }
-
- /**
- * Creates a PaddlePredictor object.
- *
- * @param config the input configuration.
- * @return the PaddlePredictor object, or null if failed to create it.
- */
- public static PaddlePredictor createPaddlePredictor(ConfigBase config) {
- PaddlePredictor predictor = new PaddlePredictor(config);
- return predictor.cppPaddlePredictorPointer == 0L ? null : predictor;
- }
-
- /**
- * Get offset-th input tensor.
- *
- * @param offset
- * @return the tensor or null if failed to get it.
- */
- public Tensor getInput(int offset) {
- long cppTensorPointer = getInputCppTensorPointer(offset);
- return cppTensorPointer == 0 ? null : new Tensor(cppTensorPointer, /* readOnly = */ false, this);
- }
-
- /**
- * Get offset-th output tensor.
- *
- * @param offset
- * @return the tensor or null if failed to get it.
- */
- public Tensor getOutput(int offset) {
- long cppTensorPointer = getOutputCppTensorPointer(offset);
- return cppTensorPointer == 0 ? null : new Tensor(cppTensorPointer, /* readOnly = */ true, this);
- }
-
- /**
- * Get a tensor by name.
- *
- * @param name the name of the tensor.
- * @return the tensor or null if failed to get it.
- */
- public Tensor getTensor(String name) {
- long cppTensorPointer = getCppTensorPointerByName(name);
- return cppTensorPointer == 0 ? null : new Tensor(cppTensorPointer, /* readOnly = */ true, this);
- }
-
- /**
- * Run the PaddlePredictor.
- *
- * @return true if run successfully.
- */
- public native boolean run();
-
- /**
- * Saves the optimized model. It is available only for {@link CxxConfig}
- *
- * @param modelDir the path to save the optimized model
- * @return true if save successfully. Otherwise returns false.
- */
- public native boolean saveOptimizedModel(String modelDir);
-
- /**
- * Deletes C++ PaddlePredictor pointer when Java PaddlePredictor object is
- * destroyed
- */
- @Override
- protected void finalize() throws Throwable {
- clear();
- super.finalize();
- }
-
- /**
- * Create a C++ PaddlePredictor object based on configuration
- *
- * @param config the input configuration
- * @return true if create successfully
- */
- protected boolean init(ConfigBase config) {
- if (config instanceof CxxConfig) {
- cppPaddlePredictorPointer = newCppPaddlePredictor((CxxConfig) config);
- } else if (config instanceof MobileConfig) {
- cppPaddlePredictorPointer = newCppPaddlePredictor((MobileConfig) config);
- } else {
- throw new IllegalArgumentException("Not supported PaddleLite Config type");
- }
- return cppPaddlePredictorPointer != 0L;
- }
-
- /**
- * Deletes C++ PaddlePredictor pointer
- *
- * @return true if deletion success
- */
- protected boolean clear() {
- boolean result = false;
- if (cppPaddlePredictorPointer != 0L) {
- result = deleteCppPaddlePredictor(cppPaddlePredictorPointer);
- cppPaddlePredictorPointer = 0L;
- }
- return result;
- }
-
- /**
- * Gets offset-th input tensor pointer at C++ side.
- *
- * @param offset
- * @return a long value which is reinterpret_cast of the C++ pointer.
- */
- private native long getInputCppTensorPointer(int offset);
-
- /**
- * Gets offset-th output tensor pointer at C++ side.
- *
- * @param offset
- * @return a long value which is reinterpret_cast of the C++ pointer.
- */
- private native long getOutputCppTensorPointer(int offset);
-
- /**
- * Gets tensor pointer at C++ side by name.
- *
- * @param name the name of the tensor.
- * @return a long value which is reinterpret_cast of the C++ pointer.
- */
- private native long getCppTensorPointerByName(String name);
-
- /**
- * Creates a new C++ PaddlePredcitor object using CxxConfig, returns the
- * reinterpret_cast value of the C++ pointer which points to C++
- * PaddlePredictor.
- *
- * @param config
- * @return a long value which is reinterpret_cast of the C++ pointer.
- */
- private native long newCppPaddlePredictor(CxxConfig config);
-
- /**
- * Creates a new C++ PaddlePredcitor object using Mobile, returns the
- * reinterpret_cast value of the C++ pointer which points to C++
- * PaddlePredictor.
- *
- * @param config
- * @return a long value which is reinterpret_cast of the C++ pointer.
- */
- private native long newCppPaddlePredictor(MobileConfig config);
-
- /**
- * Delete C++ PaddlePredictor object pointed by the input pointer, which is
- * presented by a long value.
- *
- * @param nativePointer a long value which is reinterpret_cast of the C++
- * pointer.
- * @return true if deletion success.
- */
- private native boolean deleteCppPaddlePredictor(long nativePointer);
-
- /* Initializes at the beginning */
- static {
- PaddleLiteInitializer.init();
- }
-}
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/Place.java b/lite/api/android/jni/src/com/baidu/paddle/lite/Place.java
deleted file mode 100644
index 98777f3111c65107b414c03b9691cd54a771040c..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/Place.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-/**
- * Place specifies the execution context of a Kernel or input/output for a
- * kernel. It is used to make the analysis of the MIR more clear and accurate.
- */
-public class Place {
-
- /** Place hardware target type. */
- public enum TargetType {
- UNKNOWN(0), HOST(1), X86(2), CUDA(3), ARM(4), OPEN_CL(5), FPGA(7), NPU(8), ANY(6);
-
- public final int value;
-
- private TargetType(int value) {
- this.value = value;
- }
- }
-
- /** Place precision type */
- public enum PrecisionType {
- UNKNOWN(0), FLOAT(1), INT8(2), FP16(5), INT32(3), ANY(4), BOOL(6);
-
- public final int value;
-
- private PrecisionType(int value) {
- this.value = value;
- }
- }
-
- /** Place data layout type */
- public enum DataLayoutType {
- UNKNOWN(0), NCHW(1), NHWC(3), ANY(2);
-
- public final int value;
-
- private DataLayoutType(int value) {
- this.value = value;
- }
- }
-
- private TargetType target;
- private PrecisionType precision;
- private DataLayoutType layout;
- private int device;
-
- public Place() {
- target = TargetType.UNKNOWN;
- precision = PrecisionType.UNKNOWN;
- layout = DataLayoutType.UNKNOWN;
- device = 0;
- }
-
- public Place(TargetType target) {
- this(target, PrecisionType.FLOAT);
- }
-
- public Place(TargetType target, PrecisionType precision) {
- this(target, precision, DataLayoutType.NCHW);
- }
-
- public Place(TargetType target, PrecisionType precision, DataLayoutType layout) {
- this(target, precision, layout, 0);
- }
-
- public Place(TargetType target, PrecisionType precision, DataLayoutType layout, int device) {
- this.target = target;
- this.precision = precision;
- this.layout = layout;
- this.device = device;
- }
-
- public boolean isValid() {
- return target != TargetType.UNKNOWN && precision != PrecisionType.UNKNOWN && layout != DataLayoutType.UNKNOWN;
- }
-
- public TargetType getTarget() {
- return target;
- }
-
- public void setTarget(TargetType target) {
- this.target = target;
- }
-
- public PrecisionType getPrecision() {
- return precision;
- }
-
- public void setPrecision(PrecisionType precision) {
- this.precision = precision;
- }
-
- public DataLayoutType getLayout() {
- return layout;
- }
-
- public void setLayout(DataLayoutType layout) {
- this.layout = layout;
- }
-
- public int getDevice() {
- return device;
- }
-
- public void setDevice(int device) {
- this.device = device;
- }
-
- /**
- * Returns hardware target as enum int value.
- *
- * @return hardware target as enum int value
- */
- public int getTargetInt() {
- return target.value;
- }
-
- /**
- * Returns precision target as enum int value.
- *
- * @return precision as enum int value
- */
- public int getPrecisionInt() {
- return precision.value;
- }
-
- /**
- * Returns data layout as enum int value.
- *
- * @return data layout as enum int value
- */
- public int getDataLayoutInt() {
- return layout.value;
- }
-}
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/PowerMode.java b/lite/api/android/jni/src/com/baidu/paddle/lite/PowerMode.java
deleted file mode 100644
index 36bd568406946e62d36f57a78c838431fc3e69c9..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/PowerMode.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-/**
- * PowerMode is the cpu running power mode for the light weight predictor.
- */
-public enum PowerMode {
- LITE_POWER_HIGH(0),
- LITE_POWER_LOW(1),
- LITE_POWER_FULL(2),
- LITE_POWER_NO_BIND(3),
- LITE_POWER_RAND_HIGH(4),
- LITE_POWER_RAND_LOW(5);
-
- private PowerMode(int value) {
- this.value = value;
- }
-
- public int value() {
- return this.value;
- }
-
- private final int value;
-}
diff --git a/lite/api/android/jni/src/com/baidu/paddle/lite/Tensor.java b/lite/api/android/jni/src/com/baidu/paddle/lite/Tensor.java
deleted file mode 100644
index ac78800bd2e4903b44332a0a0aefe9c69b75abab..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/src/com/baidu/paddle/lite/Tensor.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-/**
- * Tensor class provides the Java APIs that users can get or set the shape or
- * the data of a Tensor.
- */
-public class Tensor {
-
- /**
- * Java doesn't have pointer. To maintain the life cycle of underneath C++
- * PaddlePredictor object, we use a long value to maintain it.
- */
- private long cppTensorPointer;
-
- /**
- * Is this tensor read-only. This field is also used at C++ side to know whether
- * we should interpret the C++ tensor pointer to "Tensor" pointer or "const
- * Tensor" pointer.
- */
- private boolean readOnly;
-
- /**
- * Due to different memory management of Java and C++, at C++, if a user
- * destroys PaddlePredictor object, the tensor's memory will be released and a
- * pointer operating on the released tensor will cause unknown behavior. At C++
- * side, that's users' responsibility to manage memory well. But for our Java
- * code, we have to prevent this case. We make this {@link Tensor} keep a
- * reference to {@link PaddlePredictor} to prevent the {@link PaddlePredictor}
- * object be collected by JVM before {@Tensor}.
- */
- private PaddlePredictor predictor;
-
- /**
- * Accessed by package only to prevent public users to create it wrongly. A
- * Tensor can be created by {@link com.baidu.paddle.lite.PaddlePredictor} only
- */
- protected Tensor(long cppTensorPointer, boolean readOnly, PaddlePredictor predictor) {
- this.cppTensorPointer = cppTensorPointer;
- this.readOnly = readOnly;
- this.predictor = predictor;
- }
-
- /** Deletes C++ Tensor pointer when Java Tensor object is destroyed */
- protected void finalize() throws Throwable {
- if (cppTensorPointer != 0L) {
- deleteCppTensor(cppTensorPointer);
- cppTensorPointer = 0L;
- }
- super.finalize();
- }
-
- /**
- * @return whether this Tensor is read-only.
- */
- public boolean isReadOnly() {
- return readOnly;
- }
-
- /**
- * Resizes the tensor shape.
- *
- * @param dims long array of shape.
- * @return true if resize successfully.
- */
- public boolean resize(long[] dims) {
- if (readOnly) {
- return false;
- }
- return nativeResize(dims);
- }
-
- /**
- * Set the tensor float data.
- *
- * @param buf the float array buffer which will be copied into tensor.
- * @return true if set data successfully.
- */
- public boolean setData(float[] buf) {
- if (readOnly) {
- return false;
- }
- return nativeSetData(buf);
- }
-
- /**
- * Set the tensor byte data.
- *
- * @param buf the byte array buffer which will be copied into tensor.
- * @return true if set data successfully.
- */
- public boolean setData(byte[] buf) {
- if (readOnly) {
- return false;
- }
- return nativeSetData(buf);
- }
-
- /**
- * @return shape of the tensor as long array.
- */
- public native long[] shape();
-
- /**
- * @return the tensor data as float array.
- */
- public native float[] getFloatData();
-
- /**
- * @return the tensor data as byte array.
- */
- public native byte[] getByteData();
-
- private native boolean nativeResize(long[] dims);
-
- private native boolean nativeSetData(float[] buf);
-
- private native boolean nativeSetData(byte[] buf);
-
- /**
- * Delete C++ Tenor object pointed by the input pointer, which is presented by a
- * long value.
- *
- * @param nativePointer a long value which is reinterpret_cast of the C++
- * pointer.
- * @return true if deletion success.
- */
- private native boolean deleteCppTensor(long nativePointer);
-}
\ No newline at end of file
diff --git a/lite/api/android/jni/test/com/baidu/paddle/lite/PaddlePredictorTest.java b/lite/api/android/jni/test/com/baidu/paddle/lite/PaddlePredictorTest.java
deleted file mode 100644
index 0af11efd28f6628bbac6fa95b18170c8d1eed4b1..0000000000000000000000000000000000000000
--- a/lite/api/android/jni/test/com/baidu/paddle/lite/PaddlePredictorTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-package com.baidu.paddle.lite;
-
-import org.junit.jupiter.api.Test;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Deprecated test. Now we use Android demo's Instrument test.
- *
- * @TODO make this test as Java Unit test. Then we don't have to launch Android
- * demo to test.
- */
-class PaddlePredictorTest {
-
- @Test
- public void run_defaultModel() {
- MobileConfig config = new MobileConfig();
- config.setModelDir("");
- PaddlePredictor predictor = PaddlePredictor.createPaddlePredictor(config);
-
- float[] inputBuffer = new float[10000];
- for (int i = 0; i < 10000; ++i) {
- inputBuffer[i] = i;
- }
- long[] dims = { 100, 100 };
-
- Tensor input = predictor.getInput(0);
- input.resize(dims);
- input.setData(inputBuffer);
-
- predictor.run();
-
- Tensor output = predictor.getOutput(0);
- float[] outputBuffer = output.getFloatData();
-
- assertEquals(outputBuffer.length, 50000);
- assertEquals(outputBuffer[0], 50.2132f, 1e-3f);
- assertEquals(outputBuffer[1], -28.8729f, 1e-3f);
- }
-
-}
diff --git a/lite/api/apis_test.cc b/lite/api/apis_test.cc
deleted file mode 100644
index 3dc02240846ed4fc6dc310e3a27725792463da6e..0000000000000000000000000000000000000000
--- a/lite/api/apis_test.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
- * We test multiple apis here.
- */
-#include
-#include
-#include
-#include
-#include "lite/api/cxx_api.h"
-#include "lite/api/light_api.h"
-#include "lite/api/paddle_use_kernels.h"
-#include "lite/api/paddle_use_ops.h"
-#include "lite/api/paddle_use_passes.h"
-#include "lite/core/mir/pass_registry.h"
-
-DEFINE_string(model_dir, "", "");
-DEFINE_string(optimized_model, "", "");
-
-namespace paddle {
-namespace lite {
-
-void SetConstInput(lite::Tensor* x) {
- x->Resize(DDim(std::vector({100, 100})));
- auto* data = x->mutable_data();
- for (int i = 0; i < 100 * 100; i++) {
- data[i] = i;
- }
-}
-
-bool CompareTensors(const std::string& name,
- const Predictor& cxx_api,
- const LightPredictor& light_api) {
- const auto* a = cxx_api.GetTensor(name);
- const auto* b = light_api.GetTensor(name);
- return TensorCompareWith(*a, *b);
-}
-
-TEST(CXXApi_LightApi, optim_model) {
- lite::Predictor cxx_api;
- std::vector valid_places({
- Place{TARGET(kHost), PRECISION(kFloat)},
- Place{TARGET(kX86), PRECISION(kFloat)},
- Place{TARGET(kARM), PRECISION(kFloat)}, // Both works on X86 and ARM
- });
- // On ARM devices, the preferred X86 target not works, but it can still
- // select ARM kernels.
- cxx_api.Build(FLAGS_model_dir,
- "",
- "",
- Place{TARGET(kX86), PRECISION(kFloat)},
- valid_places);
- cxx_api.SaveModel(FLAGS_optimized_model);
-}
-
-TEST(CXXApi_LightApi, save_and_load_model) {
- lite::Predictor cxx_api;
- lite::LightPredictor light_api(FLAGS_optimized_model);
-
- // CXXAPi
- {
- std::vector valid_places({
- Place{TARGET(kHost), PRECISION(kFloat)},
- Place{TARGET(kX86), PRECISION(kFloat)},
- Place{TARGET(kARM), PRECISION(kFloat)}, // Both works on X86 and ARM
- });
- // On ARM devices, the preferred X86 target not works, but it can still
- // select ARM kernels.
- cxx_api.Build(FLAGS_model_dir,
- "",
- "",
- Place{TARGET(kX86), PRECISION(kFloat)},
- valid_places);
-
- auto* x = cxx_api.GetInput(0);
- SetConstInput(x);
-
- cxx_api.Run();
-
- LOG(INFO) << "Save optimized model to " << FLAGS_optimized_model;
- cxx_api.SaveModel(FLAGS_optimized_model);
- }
-
- // LightApi
- {
- auto* x = light_api.GetInput(0);
- SetConstInput(x);
-
- light_api.Run();
- }
-
- const auto* cxx_out = cxx_api.GetOutput(0);
- const auto* light_out = light_api.GetOutput(0);
- ASSERT_TRUE(TensorCompareWith(*cxx_out, *light_out));
-
- std::vector tensors_with_order({
- "a", "fc_0.w_0", "scale_0.tmp_0",
- });
-
- for (const auto& tensor_name : tensors_with_order) {
- ASSERT_TRUE(CompareTensors(tensor_name, cxx_api, light_api));
- }
-}
-
-} // namespace lite
-} // namespace paddle
diff --git a/lite/api/benchmark.cc b/lite/api/benchmark.cc
deleted file mode 100644
index ca7bfe7fe6cb57a0f10ad6ca0ed1909a5d82eac1..0000000000000000000000000000000000000000
--- a/lite/api/benchmark.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include
-#include
-#include
-#include
-#include
-#include "lite/api/paddle_api.h"
-#include "lite/api/paddle_use_kernels.h"
-#include "lite/api/paddle_use_ops.h"
-#include "lite/api/paddle_use_passes.h"
-#include "lite/api/test_helper.h"
-#include "lite/core/device_info.h"
-#include "lite/utils/cp_logging.h"
-#include "lite/utils/string.h"
-
-DEFINE_string(input_shape,
- "1,3,224,224",
- "input shapes, separated by colon and comma");
-DEFINE_string(result_filename, "", "save test result");
-DEFINE_bool(run_model_optimize,
- false,
- "apply model_optimize_tool to model, use optimized model to test");
-
-namespace paddle {
-namespace lite_api {
-
-void OutputOptModel(const std::string& load_model_dir,
- const std::string& save_optimized_model_dir,
- const std::vector>& input_shapes) {
- lite_api::CxxConfig config;
- config.set_model_dir(load_model_dir);
- config.set_preferred_place(Place{TARGET(kX86), PRECISION(kFloat)});
- config.set_valid_places({
- Place{TARGET(kX86), PRECISION(kFloat)},
- Place{TARGET(kARM), PRECISION(kFloat)},
- });
- auto predictor = lite_api::CreatePaddlePredictor(config);
-
- int ret = system(
- paddle::lite::string_format("rm -rf %s", save_optimized_model_dir.c_str())
- .c_str());
- if (ret == 0) {
- LOG(INFO) << "delete old optimized model " << save_optimized_model_dir;
- }
- predictor->SaveOptimizedModel(save_optimized_model_dir,
- LiteModelType::kNaiveBuffer);
- LOG(INFO) << "Load model from " << load_model_dir;
- LOG(INFO) << "Save optimized model to " << save_optimized_model_dir;
-}
-
-#ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
-void Run(const std::vector>& input_shapes,
- const std::string& model_dir,
- const int repeat,
- const int thread_num,
- const int warmup_times,
- const std::string model_name) {
- lite_api::MobileConfig config;
- config.set_threads(thread_num);
- if (thread_num == 1) {
- config.set_power_mode(LITE_POWER_HIGH);
- } else {
- config.set_power_mode(LITE_POWER_NO_BIND);
- }
- config.set_model_dir(model_dir);
-
- auto predictor = lite_api::CreatePaddlePredictor(config);
-
- for (int j = 0; j < input_shapes.size(); ++j) {
- auto input_tensor = predictor->GetInput(j);
- input_tensor->Resize(input_shapes[j]);
- auto input_data = input_tensor->mutable_data();
- int input_num = 1;
- for (int i = 0; i < input_shapes[j].size(); ++i) {
- input_num *= input_shapes[j][i];
- }
- for (int i = 0; i < input_num; ++i) {
- input_data[i] = 1.f;
- }
- }
-
- for (int i = 0; i < warmup_times; ++i) {
- predictor->Run();
- }
-
- auto start = lite::GetCurrentUS();
- for (int i = 0; i < repeat; ++i) {
- predictor->Run();
- }
- auto end = lite::GetCurrentUS();
-
- std::FILE* pf = std::fopen(FLAGS_result_filename.c_str(), "a");
- if (nullptr == pf) {
- LOG(INFO) << "create result file error";
- exit(0);
- }
- fprintf(pf,
- "-- %-18s avg = %5.4f ms\n",
- model_name.c_str(),
- (end - start) / repeat / 1000.0);
- std::fclose(pf);
-}
-#endif
-
-} // namespace lite_api
-} // namespace paddle
-
-int main(int argc, char** argv) {
- gflags::ParseCommandLineFlags(&argc, &argv, true);
- if (FLAGS_model_dir == "" || FLAGS_result_filename == "") {
- LOG(INFO) << "usage: "
- << "--model_dir /path/to/your/model --result_filename "
- "/path/to/resultfile";
- exit(0);
- }
-
- std::size_t found = FLAGS_model_dir.find_last_of("/");
- std::string model_name = FLAGS_model_dir.substr(found + 1);
- std::string save_optimized_model_dir = FLAGS_model_dir + "opt2";
-
- auto split_string =
- [](const std::string& str_in) -> std::vector {
- std::vector str_out;
- std::string tmp_str = str_in;
- while (!tmp_str.empty()) {
- size_t next_offset = tmp_str.find(":");
- str_out.push_back(tmp_str.substr(0, next_offset));
- if (next_offset == std::string::npos) {
- break;
- } else {
- tmp_str = tmp_str.substr(next_offset + 1);
- }
- }
- return str_out;
- };
-
- auto get_shape = [](const std::string& str_shape) -> std::vector {
- std::vector shape;
- std::string tmp_str = str_shape;
- while (!tmp_str.empty()) {
- int dim = atoi(tmp_str.data());
- shape.push_back(dim);
- size_t next_offset = tmp_str.find(",");
- if (next_offset == std::string::npos) {
- break;
- } else {
- tmp_str = tmp_str.substr(next_offset + 1);
- }
- }
- return shape;
- };
-
- std::vector str_input_shapes = split_string(FLAGS_input_shape);
- std::vector> input_shapes;
- for (int i = 0; i < str_input_shapes.size(); ++i) {
- input_shapes.push_back(get_shape(str_input_shapes[i]));
- }
-
- // Output optimized model
- if (FLAGS_run_model_optimize) {
- paddle::lite_api::OutputOptModel(
- FLAGS_model_dir, save_optimized_model_dir, input_shapes);
- }
-
-#ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
- // Run inference using optimized model
- std::string run_model_dir =
- FLAGS_run_model_optimize ? save_optimized_model_dir : FLAGS_model_dir;
- paddle::lite_api::Run(input_shapes,
- run_model_dir,
- FLAGS_repeats,
- FLAGS_threads,
- FLAGS_warmup,
- model_name);
-#endif
- return 0;
-}
diff --git a/lite/api/cxx_api.cc b/lite/api/cxx_api.cc
deleted file mode 100644
index eeba68630146870fd43bac3cd7eeaa1d9c576eac..0000000000000000000000000000000000000000
--- a/lite/api/cxx_api.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "lite/api/cxx_api.h"
-#include
-#include
-#include
-#include
-#include "lite/utils/io.h"
-#ifdef LITE_WITH_NPU
-#include "lite/backends/npu/npu_helper.h"
-#endif
-
-namespace paddle {
-namespace lite {
-
-void Predictor::SaveModel(const std::string &dir,
- lite_api::LiteModelType model_type) {
- if (!program_) {
- GenRuntimeProgram();
- }
- program_->SaveOpInfosToProgram(&program_desc_);
- program_->UpdateVarsOfProgram(&program_desc_);
- switch (model_type) {
- case lite_api::LiteModelType::kProtobuf:
- SaveModelPb(dir, *program_->exec_scope(), program_desc_, true);
- break;
- case lite_api::LiteModelType::kNaiveBuffer:
- SaveModelNaive(dir, *program_->exec_scope(), program_desc_);
- break;
- default:
- LOG(FATAL) << "Unknown model type";
- }
-#ifdef LITE_WITH_NPU
- for (auto name : npu::DeviceInfo::Global().AllClientNames()) {
- // the npu offline model is saved in current dir
- // so just copy to dst dir
- CHECK_EQ(
- system(string_format("cp -r %s %s", name.c_str(), dir.c_str()).c_str()),
- 0)
- << "Failed copy NPU model to " << dir;
- }
-#endif
-}
-
-lite::Tensor *Predictor::GetInput(size_t offset) {
- auto *_feed_list = exec_scope_->FindVar("feed");
- CHECK(_feed_list) << "no feed variable in exec_scope";
- auto *feed_list = _feed_list->GetMutable>();
- if (offset >= feed_list->size()) {
- feed_list->resize(offset + 1);
- }
- return &feed_list->at(offset);
-}
-
-const lite::Tensor *Predictor::GetOutput(size_t offset) const {
- auto *_fetch_list = exec_scope_->FindVar("fetch");
- CHECK(_fetch_list) << "no fatch variable in exec_scope";
- auto &fetch_list = *_fetch_list->GetMutable>();
- CHECK_LT(offset, fetch_list.size()) << "offset " << offset << " overflow";
- return &fetch_list.at(offset);
-}
-
-const std::vector *Predictor::GetOutputs() const {
- auto *_fetch_list = exec_scope_->FindVar("fetch");
- CHECK(_fetch_list) << "no fatch variable in exec_scope";
- auto &fetch_list = *_fetch_list->GetMutable>();
- return &fetch_list;
-}
-
-const cpp::ProgramDesc &Predictor::program_desc() const {
- return program_desc_;
-}
-const RuntimeProgram &Predictor::runtime_program() const { return *program_; }
-
-void Predictor::Build(const lite_api::CxxConfig &config,
- const std::vector &valid_places,
- const std::vector &passes,
- lite_api::LiteModelType model_type) {
- const std::string &model_path = config.model_dir();
- const std::string &model_file = config.model_file();
- const std::string ¶m_file = config.param_file();
- const Place prefer_place = config.preferred_place();
- const bool model_from_memory = config.model_from_memory();
- LOG(INFO) << "load from memory " << model_from_memory;
-
- Build(model_path,
- model_file,
- param_file,
- prefer_place,
- valid_places,
- passes,
- model_type,
- model_from_memory);
-}
-void Predictor::Build(const std::string &model_path,
- const std::string &model_file,
- const std::string ¶m_file,
- const Place &prefer_place,
- const std::vector &valid_places,
- const std::vector &passes,
- lite_api::LiteModelType model_type,
- bool model_from_memory) {
- switch (model_type) {
- case lite_api::LiteModelType::kProtobuf: {
- bool combined_param = false;
- if (!model_file.empty() && !param_file.empty()) {
- combined_param = true;
- }
- LoadModelPb(model_path,
- model_file,
- param_file,
- scope_.get(),
- &program_desc_,
- combined_param,
- model_from_memory);
- } break;
- case lite_api::LiteModelType::kNaiveBuffer:
- CHECK(!model_path.empty())
- << "NaiveBuffer backend only supported combined param";
- LoadModelNaive(model_path, scope_.get(), &program_desc_);
- break;
- default:
- LOG(FATAL) << "Unknown model type";
- }
- Build(program_desc_, prefer_place, valid_places, passes);
-}
-
-void Predictor::Build(const cpp::ProgramDesc &desc,
- const Place &prefer_place,
- const std::vector &valid_places,
- const std::vector &passes) {
- program_desc_ = desc;
- Program program(desc, scope_, valid_places);
- optimizer_.KernelPickPreferPlace(prefer_place);
- core::KernelPickFactor factor;
- factor.ConsiderTarget();
- factor.ConsiderPrecision();
- optimizer_.Run(std::move(program), valid_places, factor, passes);
- exec_scope_ = optimizer_.exec_scope();
-}
-
-void Predictor::GenRuntimeProgram() {
- program_ = optimizer_.GenRuntimeProgram();
- CHECK_EQ(exec_scope_, program_->exec_scope());
- program_generated_ = true;
-}
-
-const lite::Tensor *Predictor::GetTensor(const std::string &name) const {
- auto *var = exec_scope_->FindVar(name);
- return &var->Get();
-}
-
-#ifdef LITE_WITH_TRAIN
-void Predictor::FeedVars(const std::vector &tensors) {
- auto var = scope_->FindVar("feed");
- auto &feed_list = *(var->GetMutable>());
- feed_list.resize(tensors.size());
-
- for (size_t i = 0; i < tensors.size(); ++i)
- feed_list[i].ShareDataWith(tensors[i]);
-}
-#endif
-
-} // namespace lite
-} // namespace paddle
diff --git a/lite/api/cxx_api.h b/lite/api/cxx_api.h
deleted file mode 100644
index 2506ae47b0ddbce683d8f4b12e000bb3ea19d497..0000000000000000000000000000000000000000
--- a/lite/api/cxx_api.h
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-#include
-#include
-#include
-#include
-#include "lite/api/paddle_api.h"
-#include "lite/core/op_lite.h"
-#include "lite/core/optimizer.h"
-#include "lite/core/program.h"
-#include "lite/core/types.h"
-#include "lite/model_parser/model_parser.h"
-
-namespace paddle {
-namespace lite {
-
-/*
- * Predictor for inference, input a model, it will optimize and execute it.
- */
-class LITE_API Predictor {
- public:
- // Create an empty predictor.
- Predictor() { scope_ = std::make_shared(); }
- // Create a predictor with the weight variable scope set.
- explicit Predictor(const std::shared_ptr& root_scope)
- : scope_(root_scope) {}
-
- // Build from a model, with places set for hardware config.
- void Build(
- const lite_api::CxxConfig& config,
- const std::vector& valid_places,
- const std::vector& passes = {},
- lite_api::LiteModelType model_type = lite_api::LiteModelType::kProtobuf);
-
- void Build(
- const std::string& model_path,
- const std::string& model_file_path,
- const std::string& param_file_path,
- const Place& prefer_place,
- const std::vector& valid_places,
- const std::vector& passes = {},
- lite_api::LiteModelType model_type = lite_api::LiteModelType::kProtobuf,
- bool memory_from_memory = false);
-
- void Build(const cpp::ProgramDesc& desc,
- const Place& prefer_place,
- const std::vector& valid_places,
- const std::vector& passes = {});
-
- void GenRuntimeProgram();
-
- // Run the predictor for a single batch of data.
- void Run() {
- if (!program_generated_) {
- GenRuntimeProgram();
- }
- program_->Run();
- LOG(INFO) << "running";
- }
-
- // Get offset-th col of feed inputs.
- lite::Tensor* GetInput(size_t offset);
-
- // Get offset-th col of fetch results.
- const lite::Tensor* GetOutput(size_t offset) const;
- const std::vector* GetOutputs() const;
-
- const cpp::ProgramDesc& program_desc() const;
- const lite::Tensor* GetTensor(const std::string& name) const;
- const RuntimeProgram& runtime_program() const;
-
- // This method is disabled in mobile, for unnecessary dependencies required.
- void SaveModel(
- const std::string& dir,
- lite_api::LiteModelType model_type = lite_api::LiteModelType::kProtobuf);
-
-#ifdef LITE_WITH_TRAIN
- void Run(const std::vector& tensors) {
- FeedVars(tensors);
- program_->Run();
- }
-
- void FeedVars(const std::vector& tensors);
-#endif
-
- private:
- Optimizer optimizer_;
- cpp::ProgramDesc program_desc_;
- std::shared_ptr scope_;
- const Scope* exec_scope_;
- std::unique_ptr program_;
- bool program_generated_{false};
-};
-
-/*
- * An executor for training.
- *
- * Usage:
- *
- * CXXTrainer trainer(...);
- * trainer.RunStartupProgram(...);
- * auto exe = BuildMainProgramExecutor(...);
- *
- * for (auto& epoch : epoches) {
- * auto* tensor0 = exe.GetInput(...);
- * // fill data for tensor0
- * exe.Run();
- * }
-#ifdef LITE_WITH_X86
-class LITE_API CXXTrainer {
- public:
- CXXTrainer(const std::shared_ptr& root_scope,
- const Place& preferred_place,
- const std::vector& valid_places)
- : scope_(root_scope),
- preferred_place_(preferred_place),
- valid_places_(valid_places),
- main_program_executor_(Predictor(scope_)) {}
-
- // Build the RuntimeProgram cache for the main program. The cache will run
- // multiple times for the epoches.
- // NOTE Just support to execute the 0-th block currently.
- Predictor& BuildMainProgramExecutor(const framework::proto::ProgramDesc& desc,
- int block_id = 0) {
- main_program_executor_.Build(desc, preferred_place_, valid_places_);
- return main_program_executor_;
- }
-
-#ifdef LITE_WITH_TRAIN
- Predictor& BuildMainProgramExecutor(framework::ProgramDesc& desc) { // NOLINT
- return BuildMainProgramExecutor(*desc.Proto());
- }
-
- void RunStartupProgram(framework::ProgramDesc& desc) { // NOLINT
- RunStartupProgram(*desc.Proto());
- }
-#endif
-
- // Run the startup program. It just executes once, no cache needed.
- void RunStartupProgram(const framework::proto::ProgramDesc& desc,
- int block_id = 0) {
- Predictor exe(scope_);
- exe.Build(desc, preferred_place_, valid_places_);
- exe.Run();
- }
-
- private:
- std::shared_ptr scope_;
-
- Place preferred_place_;
- std::vector valid_places_;
-
- // The training program.
- Predictor main_program_executor_;
-};
-#endif
-*/
-
-} // namespace lite
-} // namespace paddle
diff --git a/lite/api/cxx_api_bin.cc b/lite/api/cxx_api_bin.cc
deleted file mode 100644
index 000e94307ca4acaa3a57597f4a7b0e44a57e0031..0000000000000000000000000000000000000000
--- a/lite/api/cxx_api_bin.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "lite/api/cxx_api.h"
-#include // NOLINT
-#include "lite/api/paddle_use_passes.h"
-#include "lite/core/op_registry.h"
-
-namespace paddle {
-namespace lite {
-
-using Time = decltype(std::chrono::high_resolution_clock::now());
-Time time() { return std::chrono::high_resolution_clock::now(); }
-double time_diff(Time t1, Time t2) {
- typedef std::chrono::microseconds ms;
- auto diff = t2 - t1;
- ms counter = std::chrono::duration_cast(diff);
- return counter.count() / 1000.0;
-}
-
-void Run(const char* model_dir, int repeat) {
-#ifdef LITE_WITH_ARM
- DeviceInfo::Init();
-#endif
- lite::Predictor predictor;
- std::vector valid_places({
- Place{TARGET(kHost), PRECISION(kFloat)},
- Place{TARGET(kARM), PRECISION(kFloat)},
- Place{TARGET(kARM), PRECISION(kInt8)},
- });
-
- predictor.Build(
- model_dir, "", "", Place{TARGET(kARM), PRECISION(kInt8)}, valid_places);
-
- auto* input_tensor = predictor.GetInput(0);
- input_tensor->Resize(DDim(std::vector({1, 3, 224, 224})));
- auto* data = input_tensor->mutable_data();
- for (int i = 0; i < input_tensor->dims().production(); i++) {
- data[i] = 1;
- }
-
- auto time1 = time();
- for (int i = 0; i < repeat; i++) predictor.Run();
- auto time2 = time();
- std::cout << " predict cost: " << time_diff(time1, time2) / repeat << "ms"
- << std::endl;
-
- auto* out = predictor.GetOutput(0);
- LOG(INFO) << out << " memory size " << out->data_size();
- LOG(INFO) << "out " << out->data()[0];
- LOG(INFO) << "out " << out->data()[1];
- LOG(INFO) << "dims " << out->dims();
- LOG(INFO) << "out data size: " << out->data_size();
-}
-
-} // namespace lite
-} // namespace paddle
-
-int main(int argc, char** argv) {
- CHECK_EQ(argc, 3) << "usage: ./cmd ";
- paddle::lite::Run(argv[1], std::stoi(argv[2]));
-
- return 0;
-}
-
-USE_LITE_OP(mul);
-USE_LITE_OP(fc);
-USE_LITE_OP(scale);
-USE_LITE_OP(feed);
-USE_LITE_OP(fetch);
-USE_LITE_OP(io_copy);
-USE_LITE_OP(io_copy_once);
-
-USE_LITE_OP(conv2d);
-USE_LITE_OP(batch_norm);
-USE_LITE_OP(relu);
-USE_LITE_OP(depthwise_conv2d);
-USE_LITE_OP(pool2d);
-USE_LITE_OP(elementwise_add);
-USE_LITE_OP(softmax);
-USE_LITE_OP(fake_quantize_moving_average_abs_max);
-USE_LITE_OP(fake_dequantize_max_abs);
-
-USE_LITE_KERNEL(feed, kHost, kAny, kAny, def);
-USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def);
-USE_LITE_OP(calib);
-
-#ifdef LITE_WITH_ARM
-USE_LITE_KERNEL(fc, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(fc, kARM, kInt8, kNCHW, int8out);
-USE_LITE_KERNEL(fc, kARM, kInt8, kNCHW, fp32out);
-USE_LITE_KERNEL(mul, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(scale, kARM, kFloat, kNCHW, def);
-
-USE_LITE_KERNEL(conv2d, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(conv2d, kARM, kInt8, kNCHW, int8_out);
-USE_LITE_KERNEL(conv2d, kARM, kInt8, kNCHW, fp32_out);
-USE_LITE_KERNEL(batch_norm, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(relu, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(depthwise_conv2d, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def);
-USE_LITE_KERNEL(softmax, kARM, kFloat, kNCHW, def);
-
-USE_LITE_KERNEL(calib, kARM, kInt8, kNCHW, fp32_to_int8);
-USE_LITE_KERNEL(calib, kARM, kInt8, kNCHW, int8_to_fp32);
-
-// USE_LITE_KERNEL(feed, kARM, kAny, kAny, def);
-// USE_LITE_KERNEL(fetch, kARM, kAny, kAny, def);
-#endif // LITE_WITH_ARM
-
-#ifdef LITE_WITH_CUDA
-USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def);
-USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device);
-USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host);
-USE_LITE_KERNEL(io_copy_once, kCUDA, kAny, kAny, host_to_device);
-USE_LITE_KERNEL(io_copy_once, kCUDA, kAny, kAny, device_to_host);
-#endif
diff --git a/lite/api/cxx_api_impl.cc b/lite/api/cxx_api_impl.cc
deleted file mode 100644
index b8c92a8f96afefa7a2de6b844980f9c0f769f6a9..0000000000000000000000000000000000000000
--- a/lite/api/cxx_api_impl.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "lite/api/cxx_api.h"
-#include "lite/api/paddle_api.h"
-
-namespace paddle {
-namespace lite {
-
-class CxxPaddleApiImpl : public lite_api::PaddlePredictor {
- public:
- CxxPaddleApiImpl();
-
- /// Create a new predictor from a config.
- void Init(const lite_api::CxxConfig &config);
-
- std::unique_ptr GetInput(int i) override;
-
- std::unique_ptr GetOutput(int i) const override;
-
- void Run() override;
-
- std::unique_ptr GetTensor(
- const std::string &name) const override;
-
- void SaveOptimizedModel(const std::string &model_dir,
- lite_api::LiteModelType model_type =
- lite_api::LiteModelType::kProtobuf) override;
-
- private:
- Predictor raw_predictor_;
-};
-
-CxxPaddleApiImpl::CxxPaddleApiImpl() {}
-
-void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) {
- auto places = config.valid_places();
- places.emplace_back(TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny));
- raw_predictor_.Build(config, places);
-}
-
-std::unique_ptr CxxPaddleApiImpl::GetInput(int i) {
- auto *x = raw_predictor_.GetInput(i);
- return std::unique_ptr(new lite_api::Tensor(x));
-}
-
-std::unique_ptr CxxPaddleApiImpl::GetOutput(
- int i) const {
- const auto *x = raw_predictor_.GetOutput(i);
- return std::unique_ptr(new lite_api::Tensor(x));
-}
-
-void CxxPaddleApiImpl::Run() { raw_predictor_.Run(); }
-
-std::unique_ptr CxxPaddleApiImpl::GetTensor(
- const std::string &name) const {
- auto *x = raw_predictor_.GetTensor(name);
- return std::unique_ptr(new lite_api::Tensor(x));
-}
-
-void CxxPaddleApiImpl::SaveOptimizedModel(const std::string &model_dir,
- lite_api::LiteModelType model_type) {
- raw_predictor_.SaveModel(model_dir, model_type);
-}
-
-} // namespace lite
-
-namespace lite_api {
-
-template <>
-std::shared_ptr CreatePaddlePredictor(
- const CxxConfig &config) {
- auto x = std::make_shared