diff --git a/.gitignore b/.gitignore index 9db2912c07bc2d6abb01c322a25519ac0ff158fa..ed131bdbbad6bd4dad500fa29f40a29fddeb7593 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,7 @@ build/ build_fpga/ +docs/_build/ .idea/ diff --git a/docs/advanced_user_guides/index.rst b/docs/advanced_user_guides/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/api_reference/index.rst b/docs/api_reference/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/benchmark/benchmark.md b/docs/benchmark/benchmark.md new file mode 100644 index 0000000000000000000000000000000000000000..fc6e10d71ab3c29c98327609d7daa66d2ba0c135 --- /dev/null +++ b/docs/benchmark/benchmark.md @@ -0,0 +1,147 @@ +# Benchmark 数据 + +可以参考[benchmark_tools](benchmark_tools),推荐**一键benchmark**。 + +## 测试环境 + +* 测试模型 + * fp32模型 + * mobilenet_v1 + * mobilenet_v2 + * squeezenet_v1.1 + * mnasnet + * shufflenet_v2 + + * int8模型 + * mobilenet_v1 + * mobilenet_v2 + * resnet50 + +* 测试机器(android ndk ndk-r17c) + * 骁龙855 + * xiaomi mi9, snapdragon 855 + * 4xA76(1@2.84GHz + 3@2.4GHz) + 4xA55@1.78GHz + + + * 骁龙845 + * xiaomi mi8, 845 + * 2.8GHz(大四核),1.7GHz(小四核) + + * 骁龙835 + * xiaomi mix2, snapdragon 835 + * 2.45GHz(大四核),1.9GHz(小四核) + + * 骁龙625 + * oppo R9s, snapdragon625 + * A53 x 8, big core@2.0GHz + + * 骁龙653 + * 360 N5, snapdragon 653 + * 4 x A73@2.0GHz + 4 x A53@1.4GHz + + * 麒麟970 + * HUAWEI Mate10 + +* 测试说明 + * branch: release/2.0.0 + * warmup=10, repeats=30,统计平均时间,单位是ms + * 当线程数为1时,```DeviceInfo::Global().SetRunMode```设置LITE_POWER_HIGH,否者设置LITE_POWER_NO_BIND + * 模型的输入图像的维度是{1, 3, 224, 224},输入图像的每一位数值是1 + +## 测试数据 + + +### fp32模型测试数据 + +#### paddlepaddle model + +骁龙855|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |---- +threads num|1 |2 |4 |1 |2 |4 +mobilenet_v1 |32.19 |18.81 |10.90 |30.92 |18.31 |10.15 +mobilenet_v2 |22.91 |13.75 |8.64 |21.15 |12.79 |7.84 +shufflenet_v2 |4.67 |3.37 |2.65 |4.43 |3.15 |2.66 +squeezenet_v1.1 |25.10 |15.93 |9.68 |23.28 |14.61 |8.71 +mnasnet |21.84 |13.14 |7.96 |19.61 |11.88 |7.55 + +骁龙835|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |---- +threads num|1 |2 |4 |1 |2 |4 +mobilenet_v1 |94.13 |52.17 |30.68 |88.28 |47.58 |26.64 +mobilenet_v2 |61.24 |34.64 |22.36 |56.66 |32.19 |19.63 +shufflenet_v2 |10.87 |6.92 |5.12 |10.41 |6.76 |4.97 +squeezenet_v1.1 |73.61 |42.25 |24.44 |64.87 |38.43 |23.06 +mnasnet |58.22 |33.43 |20.44 |53.43 |30.20 |18.09 + + +麒麟980|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |---- +threads num|1 |2 |4 |1 |2 |4 +mobilenet_v1 |55.11 |28.24 |13.27 |34.24 |17.74 |12.41 +mobilenet_v2 |37.03 |19.80 |51.94 |23.64 |12.98 |9.38 +shufflenet_v2 |7.26 |4.94 |15.06 |5.32 |3.33 |2.82 +squeezenet_v1.1 |42.73 |23.66 |57.39 |26.03 |14.53 |13.66 +mnasnet |36.87 |20.15 |46.04 |21.85 |12.06 |8.68 + +麒麟970|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |---- +threads num|1 |2 |4 |1 |2 |4 +mobilenet_v1 |97.80 |52.64 |34.46 |94.51 |49.36 |28.43 +mobilenet_v2 |66.55 |38.52 |23.19 |62.89 |34.93 |21.53 +shufflenet_v2 |13.78 |8.11 |5.93 |11.95 |7.90 |5.91 +squeezenet_v1.1 |77.64 |43.67 |25.72 |69.91 |40.66 |24.62 +mnasnet |61.86 |34.62 |22.68 |59.61 |32.79 |19.56 + +#### caffe model + +骁龙855|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |----| +threads num|1 |2 |4 |1 |2 |4 | +mobilenet_v1 |32.42 |18.68 |10.86 |30.92 |18.35 |10.07 | +mobilenet_v2 |29.53 |17.76 |10.89 |27.19 |16.53 |9.75 | +shufflenet_v2 |4.61 |3.29 |2.61 |4.36 |3.11 |2.51 | + + +骁龙835|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |----| +threads num|1 |2 |4 |1 |2 |4 | +mobilenet_v1 |92.52 |52.34 |30.37 |88.31 |49.75 |27.29 | +mobilenet_v2 |79.50 |45.67 |28.79 |76.13 |44.01 |26.13 | +shufflenet_v2 |10.94 |7.08 |5.16 |10.64 |6.83 |5.01 | + + +麒麟980|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |----| +threads num|1 |2 |4 |1 |2 |4 | +mobilenet_v1 |55.36 |28.18 |13.31 |34.42 |17.93 |12.52 | +mobilenet_v2 |49.17 |26.10 |65.49 |30.50 |16.66 |11.72 | +shufflenet_v2 |8.45 |5.00 |15.65 |4.58 |3.14 |2.83 | + + +麒麟970|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |----| +threads num|1 |2 |4 |1 |2 |4 | +mobilenet_v1 |97.85 |53.38 |33.85 |94.29 |49.42 |28.29 | +mobilenet_v2 |87.40 |50.25 |31.85 |85.55 |48.11 |28.24 | +shufflenet_v2 |12.16 |8.39 |6.21 |12.21 |8.33 |6.32 | + +#### int8量化模型测试数据 + +骁龙855|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |----| +threads num|1 |2 |4 |1 |2 |4 | +mobilenet_v1 |36.80 |21.58 |11.12 | 14.01 |8.13 |4.32 | +mobilenet_v2 |28.72 |19.08 |12.49 | 17.24 |11.55 |7.82 | + +骁龙835|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |----| +threads num|1 |2 |4 |1 |2 |4 | +mobilenet_v1 |60.76 |32.25 |16.66 |56.57 |29.84 |15.24 | +mobilenet_v2 |49.38 |31.10 |22.07 |47.52 |28.18 |19.24 | + + +麒麟970|armv7 | | |armv8 | | | +----| ---- | ---- | ---- | ---- |---- |----| +threads num|1 |2 |4 |1 |2 |4 | +mobilenet_v1 |65.95 |34.39 |18.68 |60.86 |30.98 |16.31 | +mobilenet_v2 |68.87 |39.39 |24.43 |65.57 |37.31 |20.87 | diff --git a/docs/benchmark/benchmark_tools.md b/docs/benchmark/benchmark_tools.md new file mode 100644 index 0000000000000000000000000000000000000000..60341762b70772bc46196b836050714b9d43228b --- /dev/null +++ b/docs/benchmark/benchmark_tools.md @@ -0,0 +1,187 @@ +# Benchmark 测试方法 + +本文将会介绍,在**Ubuntu:16.04交叉编译环境**下,用安卓手机在终端测试Paddle-Lite的性能,并介绍两种Benchmark方法: + +1. **一键Benchmark**:适用于想快速获得常见模型性能的用户,下载预编译好的benchmark可执行文件; +2. **逐步Benchmark**:将**一键Benchmark**流程拆解讲解。 + +## 环境准备 + +1. 准备[adb](https://developer.android.com/studio/command-line/adb)等必备软件: +```shell +sudo apt update +sudo apt install -y wget adb +``` +2. 检查手机与电脑连接。安卓手机USB连上电脑,打开设置 -> 开启开发者模式 -> 开启USB调试 -> 允许(授权)当前电脑调试手机; +3. 在电脑终端输入`adb devices`命令,查看当前连接到的设备: +```shell +adb devices +``` +命令成功执行,显示结果类似下面(序列码略有不同): +```shell +List of devices attached +712QSDSEMMS7C device +``` + +## 一. 一键Benchmark + +执行以下命令,完成Benchmark: + +```shell +wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_0/run_benchmark.sh +sh run_benchmark.sh +``` + +该`run_benchmark.sh`脚本会: + +1. 下载模型,并上传手机:包含mobilenetv1/v2、shufflenetv2、squeezenetv1.1、mnasnet; +2. 下载pre-built android-armv7和android-armv8的可执行文件,并上传手机:`benchmark_bin_v7`和`benchmark_bin_v8`; +3. 自动执行另一个脚本`benchmark.sh`(多台手机连接USB,请在`benchmark.sh`脚本中对`adb`命令后加上测试手机的`serial number`); +4. 从手机下载benchmark结果`result_armv7.txt`和`result_armv8.txt`,到当前目录,并显示Benchmark结果。 + +## 二. 逐步Benchmark + +### 1. 获取benchmark可执行文件 + +benchmark_bin文件可以测试PaddleLite的性能,有下面两种方式获得。 + +#### 方式一:下载benchmark_bin可执行文件 + +```shell +# Download benchmark_bin for android-armv7 +wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_0/benchmark_bin_v7 + +# Download benchmark_bin for android-armv8 +wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_0/benchmark_bin_v8 +``` + +#### 方式二:由源码编译benchmark_bin文件 + +根据[源码编译](../source_compile)准备编译环境,拉取PaddleLite最新release发布版代码,并在仓库根目录下,执行: + +```shell +########################################### +# Build benchmark_bin for android-armv7 # +########################################### +./lite/tools/ci_build.sh \ + --arm_os="android" \ + --arm_abi="armv7" \ + --arm_lang="gcc " \ + build_arm + +# `benchmark_bin` 在: /build.lite.android.armv7.gcc/lite/api/benchmark_bin + +########################################### +# Build benchmark_bin for android-armv8 # +########################################### +./lite/tools/ci_build.sh \ + --arm_os="android" \ + --arm_abi="armv8" \ + --arm_lang="gcc " \ + build_arm + +# `benchmark_bin` 在: /build.lite.android.armv8.gcc/lite/api/benchmark_bin +``` + +> **注意**:为了避免在docker内部访问不到手机的问题,建议编译得到benchmark_bin后退出到docker外面,并且将benchmark_bin文件拷贝到一个临时目录。然后在该临时目录下,按照下面步骤下载模型、拷贝脚本、测试。 + +### 2. 准备模型 + +PaddleLite为Benchmark准备好了[常见Benchmark模型](https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_0/benchmark_models.tgz)。 + +执行以下命令,下载常见Benchmark模型并解压: + +```shell +wget -c https://paddle-inference-dist.bj.bcebos.com/PaddleLite/benchmark_0/benchmark_models.tgz +tar zxvf benchmark_models.tgz +``` + +如果测试其他模型,请将模型文件放到 `benchmark_models` 文件夹中。 + +### 3. benchmark.sh脚本 + +benchmark测试的执行脚本`benchmark.sh` 位于源码中的`/PaddleLite/lite/tools/benchmark.sh`位置,测试时需要将`benchmark.sh`、 `benchmark_bin` 、 `benchmark_models` 文件复制到同一目录下。 + +### 4. 测试 + +从终端进入benchmark.sh、可执行文件(benchmark_bin_v7、benchmark_bin_v8)和模型文件(benchmark_models)所在文件夹。 + +如果 `benchmark_models` 中所有模型文件都已经使用 `model_optimize_tool` 进行转换,则使用 benchmark.sh 脚本执行如下命令进行测试: + +```shell +# Benchmark for android-armv7 +sh benchmark.sh ./benchmark_bin_v7 ./benchmark_models result_armv7.txt + +# Benchmark for android-armv8 +sh benchmark.sh ./benchmark_bin_v8 ./benchmark_models result_armv8.txt +``` + +如果 `benchmark_models` 中所有模型文件都没有使用 `model_optimize_tool` 进行转换,则执行下面的命令。`benchmark_bin` 会首先转换模型,然后加载模型进行测试。 + +```shell +# Benchmark for android-armv7 +sh benchmark.sh ./benchmark_bin_v7 ./benchmark_models result_armv7.txt true + +# Benchmark for android-armv8 +sh benchmark.sh ./benchmark_bin_v8 ./benchmark_models result_armv8.txt true +``` + +测试结束后,armv7和armv8的结果,分别保存在当前目录下的`result_armv7.txt`和`result_armv8.txt`文件中。 + +**查看测试结果** + +在当前目录的`result_armv7.txt`和`result_armv8.txt`文件,查看测试结果。 + +> 不同手机,不同版本,测试模型的性能数据不同。 + +```shell +run benchmark armv7 +-------------------------------------- +PaddleLite Benchmark +Threads=1 Warmup=10 Repeats=30 +-- mnasnet avg = 159.8427 ms +-- mobilenet_v1 avg = 235.0072 ms +-- mobilenet_v2 avg = 173.0387 ms +-- shufflenet_v2 avg = 76.0040 ms +-- squeezenet_v11 avg = 164.2957 ms + +Threads=2 Warmup=10 Repeats=30 +-- mnasnet avg = 83.1287 ms +-- mobilenet_v1 avg = 121.6029 ms +-- mobilenet_v2 avg = 86.6175 ms +-- shufflenet_v2 avg = 41.5761 ms +-- squeezenet_v11 avg = 87.8678 ms + +Threads=4 Warmup=10 Repeats=30 +-- mnasnet avg = 73.3880 ms +-- mobilenet_v1 avg = 119.0739 ms +-- mobilenet_v2 avg = 85.3050 ms +-- shufflenet_v2 avg = 38.0762 ms +-- squeezenet_v11 avg = 64.2201 ms +-------------------------------------- + +run benchmark armv8 +-------------------------------------- +PaddleLite Benchmark +Threads=1 Warmup=10 Repeats=30 +-- mnasnet avg = 165.3073 ms +-- mobilenet_v1 avg = 306.0188 ms +-- mobilenet_v2 avg = 195.1884 ms +-- shufflenet_v2 avg = 99.3692 ms +-- squeezenet_v11 avg = 156.6971 ms + +Threads=2 Warmup=10 Repeats=30 +-- mnasnet avg = 90.2290 ms +-- mobilenet_v1 avg = 157.0007 ms +-- mobilenet_v2 avg = 118.1607 ms +-- shufflenet_v2 avg = 68.6804 ms +-- squeezenet_v11 avg = 91.3090 ms + +Threads=4 Warmup=10 Repeats=30 +-- mnasnet avg = 179.9730 ms +-- mobilenet_v1 avg = 204.0684 ms +-- mobilenet_v2 avg = 181.6486 ms +-- shufflenet_v2 avg = 123.2728 ms +-- squeezenet_v11 avg = 412.9046 ms +-------------------------------------- +``` diff --git a/docs/benchmark/index.rst b/docs/benchmark/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..bf4d1f6d491b3016817ff413396690e433171eeb --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +#sys.path.insert(0, os.path.abspath('.')) + +import sphinx_rtd_theme +from recommonmark.parser import CommonMarkParser +from recommonmark.transform import AutoStructify + +# -- Project information ----------------------------------------------------- + +project = u'Paddle-Lite' +copyright = u'2020, Paddle-Lite Developer' +author = u'Paddle-Lite Developer' + +# The short X.Y version +version = u'latest' +# The full version, including alpha/beta/rc tags +release = u'' + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['recommonmark'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = ['.rst', '.md'] + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Paddle-Litedoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'Paddle-Lite.tex', u'Paddle-Lite Documentation', + u'Paddle-Lite Developer', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'paddle-lite', u'Paddle-Lite Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'Paddle-Lite', u'Paddle-Lite Documentation', + author, 'Paddle-Lite', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] diff --git a/docs/develop_guides/index.rst b/docs/develop_guides/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/images/architecture.png b/docs/images/architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..35cb336a0640c868d6fc1df738f039a0e7b5884d Binary files /dev/null and b/docs/images/architecture.png differ diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..901076bccbd9fa9861c7db07e41d655097618ab0 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,57 @@ +.. Paddle-Lite documentation master file, created by + sphinx-quickstart on Thu Feb 6 14:11:30 2020. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Paddle-Lite's documentation! +======================================= + +.. toctree:: + :maxdepth: 1 + :caption: 简介 + :name: sec-introduction + + introduction/tech_highlights + introduction/architecture + +.. toctree:: + :maxdepth: 1 + :caption: Benchmark数据和方法 + :name: sec-benchmark + + benchmark/benchmark + benchmark/benchmark_tools + +.. toctree:: + :maxdepth: 1 + :caption: 安装 + :name: sec-install + + installation/source_compile + +.. toctree:: + :maxdepth: 1 + :caption: 使用指南 + :name: sec-user-guides + +.. toctree:: + :maxdepth: 1 + :caption: 进阶使用指南 + +.. toctree:: + :maxdepth: 1 + :caption: 开发者文档 + +.. toctree:: + :maxdepth: 1 + :caption: API文档 + +.. toctree:: + :maxdepth: 1 + :caption: FAQ + +.. toctree:: + :maxdepth: 1 + :caption: paddle-mobile + + diff --git a/docs/installation/source_compile.md b/docs/installation/source_compile.md new file mode 100644 index 0000000000000000000000000000000000000000..c0a86d92b6eba5526992031f36441fb8cc4fb537 --- /dev/null +++ b/docs/installation/source_compile.md @@ -0,0 +1,7 @@ +# 源码编译 + +目前支持三种编译的环境: + +1. Docker 容器环境, +2. Linux(推荐 Ubuntu 16.04)环境, +3. Mac OS 环境。 diff --git a/docs/introduction/architecture.md b/docs/introduction/architecture.md new file mode 100644 index 0000000000000000000000000000000000000000..1a94494af0b44a03988266d341be5788c46f96c2 --- /dev/null +++ b/docs/introduction/architecture.md @@ -0,0 +1,94 @@ +# 架构设计 + +Mobile 在这次升级为 Lite 架构, 侧重多硬件、高性能的支持,其主要设计思想如下 + +- 引入 Type system,强化多硬件、量化方法、data layout 的混合调度能力 +- 硬件细节隔离,通过不同编译开关,对支持的任何硬件可以自由插拔 +- 引入 MIR(Machine IR) 的概念,强化带执行环境下的优化支持 +- 优化期和执行期严格隔离,保证预测时轻量和高效率 + +架构图如下 + +![Paddle Inference Refactor1.0](https://user-images.githubusercontent.com/52520497/64949619-26e49580-d8ac-11e9-855a-514feb9b75af.png) + +## 编译期和执行期严格隔离设计 + +- compile time 优化完毕可以将优化信息存储到模型中;execution time 载入并执行 +- 两套 API 及对应的预测lib,满足不同场景 + - `CxxPredictor` 打包了 `Compile Time` 和 `Execution Time`,可以 runtime 在具体硬件上做分析和优化,得到最优效果 + - `MobilePredictor` 只打包 `Execution Time`,保持部署和执行的轻量 + +## `Execution Time` 轻量级设计和实现 + +- 每个 batch 实际执行只包含两个步骤执行 + - `Op.InferShape` + - `Kernel.Run`,Kernel 相关参数均使用指针提前确定,后续无查找或传参消耗 + - 设计目标,执行时,只有 kernel 计算本身消耗 +- 轻量级 `Op` 及 `Kernel` 设计,避免框架额外消耗 + - `Op` 只有 `CreateKernels` 和 `InferShape` 两个重要职能 + - `Kernel` 只有 `Run` 职能 + +## 多硬件后端支持 + +- 硬件通用行为,使用 `TargetWrapper` 模块做适配器适配,对上层框架提供一致界面 +- 框架上层策略保持硬件无关,如存储优化 (Memory optimize),计算剪枝 (Computation prune) 等,任何硬件接入均可直接复用 +- 框架支持了硬件通用行为,特定硬件细节不做过多约束,各硬件可以自行实现并接入框架 +- 计算模式上目前支持两种主流模型,一种是类似 X86, ARM CPU 等非异构设备;一种是 GPU,或 FPGA 等异构设备(支持 stream, event异步执行模式以及跨设备拷贝) + +--- +## 多硬件及算法混合调度支持 +`TensorTy` 用来表示 Tensor 类型 + +```c++ +struct TensorTy { + TargetType target; + PrecisionType precision; + DataLayout layout; + int deviceid; +}; +``` + +```c++ +enum class TargetType { kARM, kX86, kCUDA, kOpenCL }; +enum class PrecisionType { kFP32, kFP16, kInt8, kInt16 }; +enum class DataLayout { kNCHW, kNHWC }; +``` +--- + +注册 Kernel,确定特定 Kernel 的输入输出特征 + +```c++ +REGISTER_LITE_KERNEL( + mul, kARM, kFloat, kNCHW, arm::MulCompute, def) + .BindInput("X", {LiteType::GetTensorTy(kARM, kFloat, kNCHW)}) + .BindInput("Y", {LiteType::GetTensorTy(kARM, kFloat, kNCHW))}) + .BindOutput("Out", {LiteType::GetTensorTy(kARM, kFloat, kNCHW)}) + .Finalize(); +``` + +--- + +同一个 Op 的不同 Kernel 类似函数重载 + +用于支持任意的混合调度: + +1. 标记模型中所有 tensor 的 Type +2. 标记 Kernel 的 硬件、执行精度、data layout 等信息 + +全局做类型推断,当发现 tensor 传递中有类型冲突,采用 type cast 操作,通过插入特定功能 Op 来实现正确的传导 + +![lite-7](https://user-images.githubusercontent.com/52520497/64949642-395ecf00-d8ac-11e9-8b69-ced1996abc3b.png) + + + +--- + +## MIR 用于图分析优化 + +基于 Type System 的 SSA,通过 IR Pass 对计算图进行分析和优化: + +- 支持对整个 graph 进行类型推断,发现类型冲突并加入 type cast op,来支持通用混合调度 +- 计算剪枝 (Compute prune),比如去掉 scale(1), assign op 等 +- 存储优化 (Memory optimize) +- 操作熔合 (Operator fuse)(已经支持 fc, conv_bn, ele_add+act 等6种 fuse 策略) +- 支持量化处理(已支持 Int8预测) diff --git a/docs/introduction/tech_highlights.md b/docs/introduction/tech_highlights.md new file mode 100644 index 0000000000000000000000000000000000000000..83618aaa4bcbd9b7383782d193580e1d3dec7143 --- /dev/null +++ b/docs/introduction/tech_highlights.md @@ -0,0 +1,44 @@ +# 技术特点 + +不同于普通的移动端预测基于类 Caffe 的架构,Lite 架构最早的设计目标来源于 Paddle Server 和 Mobile 两种场景的要求,其中 Server 端需要有完善的图分析和优化能力,而 Mobile 端要求有轻量级部署的能力,两种场景共同的要求是高性能,多硬件支持等。 + +基于上述要求,Lite 架构完整实现了相应的能力,重点描述如下。 + +## 多硬件支持 + +Lite 架构已经验证和完整支持从 Mobile 到 Server 多种硬件的支持需求,包括 ARM CPU, ARM GPU, Huawei NPU, Intel X86 CPU, NV GPU 等。 得益于对不同硬件适度的抽象,在Lite 框架本身清晰的同时支持不同硬件的特殊调度需求,使得Lite架构在框架的清晰程度和硬件的特定调度优化上达到很好的平衡,比如 Nvidia GPU 上复杂的 stream, event 分配,在 Lite 中可以清晰表示。 + +多种硬件的 Kernel 在代码层和执行层均互不干扰,用户可以自由插拔任何硬件的支持。 + +## 高性能 + +高性能来源于两方面,一是 Kernel 优化;二是框架执行。 + +Kernel 方面,我们对相应硬件上的 Kernel 通过指令集、操作熔合、算法改写等方式进行了深入优化。 + +框架执行方面,通过简化 Op 和 Kernel 的功能,使得执行期的框架开销极低;此外,框架极大的灵活性可以支持各种硬件的特定调度优化以提升整体效率。 + +## 量化支持 + +Lite 支持Paddle Slim 强大的量化训练完毕的模型,因此完整保留了量化计算的高性能以及量化训练的高精度。 + +## 强大的图分析和优化能力 + +在图分析优化上,不同于常规的移动端预测引擎基于 Python 脚本工具转化模型, Lite 架构上有完整基于 C++ 开发的 IR 及相应 Pass 集合,以支持操作熔合 (Operator fusion),计算剪枝 (Computation pruning),存储优化 (Memory optimization),量化计算 (Quantitative computation) 等多类计算图优化。 + +更多的优化策略可以简单通过添加 Pass 的方式模块化支持。 + +## 轻量级部署 + +尽管图优化上有复杂的策略,但并不影响移动端的轻量级部署,图分析模块和最终的执行引擎可以拆开使用,最终部署只有一层薄薄的 Kernel 。 + +## 可支持任意硬件的混合调度 + +Lite 支持系统可见任意硬件的混合调度,目前已经支持 ARM CPU 和 ARM GPU 的 Kernel 自动混合调度,并验证了 X86 CPU 和 Nvidia GPU 间的混合调度。 + +支持混合调度的考量有两点: + +1. 当系统内同时存在多种硬件可用时,混合调度可以充分利用各类硬件资源 +2. 随着支持模型的增多,各硬件对kernel的支持丰富度不一,难免需要混合调度才能跑通 + +Lite架构通过从底层支持 `Type system` 的方式通用建模各类混合执行的行为,从而能够相对完备地支持混调。 diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000000000000000000000000000000000000..7893348a1b7dbb588983a48e6991282eae7e1b55 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/docs/paddle_mobile/index.rst b/docs/paddle_mobile/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f11fa32f6f465f7b002d7fd37cbd78203206d8d7 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,4 @@ +sphinx +recommonmark +sphinx_markdown_tables +sphinx_rtd_theme diff --git a/docs/user_guides/index.rst b/docs/user_guides/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391