未验证 提交 a81542a0 编写于 作者: C cuicheng01 提交者: GitHub

Merge pull request #1826 from cuicheng01/develop

Add cls G1-G2 model lite tipc
ARM_ABI = arm8 ARM_ABI = arm8
export ARM_ABI export ARM_ABI
include ../Makefile.def LITE_ROOT=./inference_lite_lib.android.armv8
LITE_ROOT=../../../ include ${LITE_ROOT}/demo/cxx/Makefile.def
THIRD_PARTY_DIR=${LITE_ROOT}/third_party THIRD_PARTY_DIR=${LITE_ROOT}/third_party
...@@ -29,7 +29,7 @@ OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/libs/libopencv_im ...@@ -29,7 +29,7 @@ OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/libs/libopencv_im
${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libtbb.a \ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libtbb.a \
${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libcpufeatures.a ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PATH}/3rdparty/libs/libcpufeatures.a
OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/${ARM_PATH}/include OPENCV_INCLUDE = -I${LITE_ROOT}/third_party/${OPENCV_VERSION}/${ARM_PATH}/include
CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include
......
clas_model_file ./MobileNetV3_large_x1_0.nb clas_model_file /data/local/tmp/arm_cpu/MobileNetV3_large_x1_0.nb
label_path ./imagenet1k_label_list.txt label_path /data/local/tmp/arm_cpu/imagenet1k_label_list.txt
resize_short_size 256 resize_short_size 256
crop_size 224 crop_size 224
visualize 0 visualize 0
num_threads 1
batch_size 1
precision FP32
runtime_device arm_cpu
enable_benchmark 0 enable_benchmark 0
tipc_benchmark 0
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <sys/time.h> #include <sys/time.h>
#include <vector> #include <vector>
#include "AutoLog/auto_log/lite_autolog.h"
using namespace paddle::lite_api; // NOLINT using namespace paddle::lite_api; // NOLINT
using namespace std; using namespace std;
...@@ -149,8 +150,10 @@ cv::Mat CenterCropImg(const cv::Mat &img, const int &crop_size) { ...@@ -149,8 +150,10 @@ cv::Mat CenterCropImg(const cv::Mat &img, const int &crop_size) {
std::vector<RESULT> std::vector<RESULT>
RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img, RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
const std::map<std::string, std::string> &config, const std::map<std::string, std::string> &config,
const std::vector<std::string> &word_labels, double &cost_time) { const std::vector<std::string> &word_labels, double &cost_time,
std::vector<double> *time_info) {
// Read img // Read img
auto preprocess_start = std::chrono::steady_clock::now();
int resize_short_size = stoi(config.at("resize_short_size")); int resize_short_size = stoi(config.at("resize_short_size"));
int crop_size = stoi(config.at("crop_size")); int crop_size = stoi(config.at("crop_size"));
int visualize = stoi(config.at("visualize")); int visualize = stoi(config.at("visualize"));
...@@ -172,8 +175,8 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img, ...@@ -172,8 +175,8 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
std::vector<float> scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; std::vector<float> scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
const float *dimg = reinterpret_cast<const float *>(img_fp.data); const float *dimg = reinterpret_cast<const float *>(img_fp.data);
NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols, mean, scale); NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols, mean, scale);
auto preprocess_end = std::chrono::steady_clock::now();
auto start = std::chrono::system_clock::now(); auto inference_start = std::chrono::system_clock::now();
// Run predictor // Run predictor
predictor->Run(); predictor->Run();
...@@ -181,9 +184,10 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img, ...@@ -181,9 +184,10 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
std::unique_ptr<const Tensor> output_tensor( std::unique_ptr<const Tensor> output_tensor(
std::move(predictor->GetOutput(0))); std::move(predictor->GetOutput(0)));
auto *output_data = output_tensor->data<float>(); auto *output_data = output_tensor->data<float>();
auto end = std::chrono::system_clock::now(); auto inference_end = std::chrono::system_clock::now();
auto postprocess_start = std::chrono::system_clock::now();
auto duration = auto duration =
std::chrono::duration_cast<std::chrono::microseconds>(end - start); std::chrono::duration_cast<std::chrono::microseconds>(inference_end - inference_start);
cost_time = double(duration.count()) * cost_time = double(duration.count()) *
std::chrono::microseconds::period::num / std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den; std::chrono::microseconds::period::den;
...@@ -196,6 +200,13 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img, ...@@ -196,6 +200,13 @@ RunClasModel(std::shared_ptr<PaddlePredictor> predictor, const cv::Mat &img,
cv::Mat output_image; cv::Mat output_image;
auto results = auto results =
PostProcess(output_data, output_size, word_labels, output_image); PostProcess(output_data, output_size, word_labels, output_image);
auto postprocess_end = std::chrono::system_clock::now();
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
time_info->push_back(double(preprocess_diff.count() * 1000));
std::chrono::duration<float> inference_diff = inference_end - inference_start;
time_info->push_back(double(inference_diff.count() * 1000));
std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
time_info->push_back(double(postprocess_diff.count() * 1000));
if (visualize) { if (visualize) {
std::string output_image_path = "./clas_result.png"; std::string output_image_path = "./clas_result.png";
...@@ -309,6 +320,12 @@ int main(int argc, char **argv) { ...@@ -309,6 +320,12 @@ int main(int argc, char **argv) {
std::string clas_model_file = config.at("clas_model_file"); std::string clas_model_file = config.at("clas_model_file");
std::string label_path = config.at("label_path"); std::string label_path = config.at("label_path");
std::string crop_size = config.at("crop_size");
int num_threads = stoi(config.at("num_threads"));
int batch_size = stoi(config.at("batch_size"));
std::string precision = config.at("precision");
std::string runtime_device = config.at("runtime_device");
bool tipc_benchmark = bool(stoi(config.at("tipc_benchmark")));
// Load Labels // Load Labels
std::vector<std::string> word_labels = LoadLabels(label_path); std::vector<std::string> word_labels = LoadLabels(label_path);
...@@ -319,8 +336,9 @@ int main(int argc, char **argv) { ...@@ -319,8 +336,9 @@ int main(int argc, char **argv) {
cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB); cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB);
double run_time = 0; double run_time = 0;
std::vector<double> time_info;
std::vector<RESULT> results = std::vector<RESULT> results =
RunClasModel(clas_predictor, srcimg, config, word_labels, run_time); RunClasModel(clas_predictor, srcimg, config, word_labels, run_time, &time_info);
std::cout << "===clas result for image: " << img_path << "===" << std::endl; std::cout << "===clas result for image: " << img_path << "===" << std::endl;
for (int i = 0; i < results.size(); i++) { for (int i = 0; i < results.size(); i++) {
...@@ -338,6 +356,19 @@ int main(int argc, char **argv) { ...@@ -338,6 +356,19 @@ int main(int argc, char **argv) {
} else { } else {
std::cout << "Current time cost: " << run_time << " s." << std::endl; std::cout << "Current time cost: " << run_time << " s." << std::endl;
} }
if (tipc_benchmark) {
AutoLogger autolog(clas_model_file,
runtime_device,
num_threads,
batch_size,
crop_size,
precision,
time_info,
1);
std::cout << "=======================TIPC Lite Information=======================" << std::endl;
autolog.report();
}
} }
return 0; return 0;
......
...@@ -25,8 +25,8 @@ Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理 ...@@ -25,8 +25,8 @@ Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理
1. [建议]直接下载,预测库下载链接如下: 1. [建议]直接下载,预测库下载链接如下:
|平台|预测库下载链接| |平台|预测库下载链接|
|-|-| |-|-|
|Android|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)| |Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv7.clang.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv.tar.gz)|
|iOS|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)| |iOS|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
**注**: **注**:
1. 如果是从 Paddle-Lite [官方文档](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html#android-toolchain-gcc)下载的预测库, 1. 如果是从 Paddle-Lite [官方文档](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html#android-toolchain-gcc)下载的预测库,
...@@ -44,11 +44,11 @@ git checkout develop ...@@ -44,11 +44,11 @@ git checkout develop
**注意**:编译Paddle-Lite获得预测库时,需要打开`--with_cv=ON --with_extra=ON`两个选项,`--arch`表示`arm`版本,这里指定为armv8,更多编译命令介绍请参考[链接](https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Android.html#id2) **注意**:编译Paddle-Lite获得预测库时,需要打开`--with_cv=ON --with_extra=ON`两个选项,`--arch`表示`arm`版本,这里指定为armv8,更多编译命令介绍请参考[链接](https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Android.html#id2)
直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8/`文件夹,通过编译Paddle-Lite得到的预测库位于`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。 直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/`文件夹,通过编译Paddle-Lite得到的预测库位于`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。
预测库的文件目录如下: 预测库的文件目录如下:
``` ```
inference_lite_lib.android.armv8/ inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/
|-- cxx C++ 预测库和头文件 |-- cxx C++ 预测库和头文件
| |-- include C++ 头文件 | |-- include C++ 头文件
| | |-- paddle_api.h | | |-- paddle_api.h
...@@ -86,7 +86,7 @@ Python下安装 `paddlelite`,目前最高支持`Python3.7`。 ...@@ -86,7 +86,7 @@ Python下安装 `paddlelite`,目前最高支持`Python3.7`。
**注意**`paddlelite`whl包版本必须和预测库版本对应。 **注意**`paddlelite`whl包版本必须和预测库版本对应。
```shell ```shell
pip install paddlelite==2.8 pip install paddlelite==2.10
``` ```
之后使用`paddle_lite_opt`工具可以进行inference模型的转换。`paddle_lite_opt`的部分参数如下 之后使用`paddle_lite_opt`工具可以进行inference模型的转换。`paddle_lite_opt`的部分参数如下
...@@ -146,6 +146,24 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel -- ...@@ -146,6 +146,24 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --
**注意**`--optimize_out` 参数为优化后模型的保存路径,无需加后缀`.nb``--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。 **注意**`--optimize_out` 参数为优化后模型的保存路径,无需加后缀`.nb``--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。
<a name="2.1.4"></a>
#### 2.1.4 执行编译,得到可执行文件clas_system
```shell
# 克隆 Autolog 代码库,以便获取自动化日志
cd PaddleClas_root_path
cd deploy/lite/
git clone https://github.com/LDOUBLEV/AutoLog.git
```
```shell
# 编译
make -j
```
执行 `make` 命令后,会在当前目录生成 `clas_system` 可执行文件,该文件用于 Lite 预测。
<a name="2.2与手机联调"></a> <a name="2.2与手机联调"></a>
### 2.2 与手机联调 ### 2.2 与手机联调
...@@ -167,7 +185,7 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel -- ...@@ -167,7 +185,7 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --
win上安装需要去谷歌的安卓平台下载ADB软件包进行安装:[链接](https://developer.android.com/studio) win上安装需要去谷歌的安卓平台下载ADB软件包进行安装:[链接](https://developer.android.com/studio)
4. 手机连接电脑后,开启手机`USB调试`选项,选择`文件传输`模式,在电脑终端中输入: 3. 手机连接电脑后,开启手机`USB调试`选项,选择`文件传输`模式,在电脑终端中输入:
```shell ```shell
adb devices adb devices
...@@ -178,40 +196,18 @@ List of devices attached ...@@ -178,40 +196,18 @@ List of devices attached
744be294 device 744be294 device
``` ```
5. 准备优化后的模型、预测库文件、测试图像和类别映射文件。
```shell
cd PaddleClas_root_path
cd deploy/lite/
# 运行prepare.sh 4. 将优化后的模型、预测库文件、测试图像和类别映射文件push到手机上。
# prepare.sh 会将预测库文件、测试图像和使用的字典文件放置在预测库中的demo/cxx/clas文件夹下
sh prepare.sh /{lite prediction library path}/inference_lite_lib.android.armv8
# 进入lite demo的工作目录 ```shell
cd /{lite prediction library path}/inference_lite_lib.android.armv8/ adb shell mkdir -p /data/local/tmp/arm_cpu/
cd demo/cxx/clas/ adb push clas_system /data/local/tmp/arm_cpu/
adb shell chmod +x /data/local/tmp/arm_cpu//clas_system
# 将C++预测动态库so文件复制到debug文件夹中 adb push inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/arm_cpu/
cp ../../../cxx/lib/libpaddle_light_api_shared.so ./debug/ adb push MobileNetV3_large_x1_0.nb /data/local/tmp/arm_cpu/
``` adb push config.txt /data/local/tmp/arm_cpu/
adb push ../../ppcls/utils/imagenet1k_label_list.txt /data/local/tmp/arm_cpu/
`prepare.sh``PaddleClas/deploy/lite/imgs/tabby_cat.jpg` 作为测试图像,将测试图像复制到`demo/cxx/clas/debug/` 文件夹下。 adb push imgs/tabby_cat.jpg /data/local/tmp/arm_cpu/
`paddle_lite_opt` 工具优化后的模型文件放置到 `/{lite prediction library path}/inference_lite_lib.android.armv8/demo/cxx/clas/debug/` 文件夹下。本例中,使用[2.1.3](#2.1.3)生成的 `MobileNetV3_large_x1_0.nb` 模型文件。
执行完成后,clas文件夹下将有如下文件格式:
```
demo/cxx/clas/
|-- debug/
| |--MobileNetV3_large_x1_0.nb 优化后的分类器模型文件
| |--tabby_cat.jpg 待测试图像
| |--imagenet1k_label_list.txt 类别映射文件
| |--libpaddle_light_api_shared.so C++预测库文件
| |--config.txt 分类预测超参数配置
|-- config.txt 分类预测超参数配置
|-- image_classfication.cpp 图像分类代码文件
|-- Makefile 编译文件
``` ```
#### 注意: #### 注意:
...@@ -224,32 +220,22 @@ clas_model_file ./MobileNetV3_large_x1_0.nb # 模型文件地址 ...@@ -224,32 +220,22 @@ clas_model_file ./MobileNetV3_large_x1_0.nb # 模型文件地址
label_path ./imagenet1k_label_list.txt # 类别映射文本文件 label_path ./imagenet1k_label_list.txt # 类别映射文本文件
resize_short_size 256 # resize之后的短边边长 resize_short_size 256 # resize之后的短边边长
crop_size 224 # 裁剪后用于预测的边长 crop_size 224 # 裁剪后用于预测的边长
visualize 0 # 是否进行可视化,如果选择的话,会在当前文件夹下生成名为clas_result.png的图像文件。 visualize 0 # 是否进行可视化,如果选择的话,会在当前文件夹下生成名为clas_result.png的图像文件
num_threads 1 # 线程数,默认是1。
precision FP32 # 精度类型,可以选择 FP32 或者 INT8,默认是 FP32。
runtime_device arm_cpu # 设备类型,默认是 arm_cpu
enable_benchmark 0 # 是否开启benchmark, 默认是 0
tipc_benchmark 0 # 是否开启tipc_benchmark,默认是 0
``` ```
5. 启动调试,上述步骤完成后就可以使用ADB将文件夹 `debug/` push到手机上运行,步骤如下: 5. 执行预测命令
```shell 执行以下命令,可完成在手机上的预测。
# 执行编译,得到可执行文件clas_system
make -j
# 将编译得到的可执行文件移动到debug文件夹中
mv clas_system ./debug/
# 将上述debug文件夹push到手机上
adb push debug /data/local/tmp/
adb shell
cd /data/local/tmp/debug
export LD_LIBRARY_PATH=/data/local/tmp/debug:$LD_LIBRARY_PATH
# clas_system可执行文件的使用方式为: ```shell
# ./clas_system 配置文件路径 测试图像路径 adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/clas_system /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/tabby_cat.jpg'
./clas_system ./config.txt ./tabby_cat.jpg
``` ```
如果对代码做了修改,则需要重新编译并push到手机上。
运行效果如下: 运行效果如下:
<div align="center"> <div align="center">
...@@ -263,3 +249,4 @@ A1:如果已经走通了上述步骤,更换模型只需要替换 `.nb` 模 ...@@ -263,3 +249,4 @@ A1:如果已经走通了上述步骤,更换模型只需要替换 `.nb` 模
Q2:换一个图测试怎么做? Q2:换一个图测试怎么做?
A2:替换 debug 下的测试图像为你想要测试的图像,使用 ADB 再次 push 到手机上即可。 A2:替换 debug 下的测试图像为你想要测试的图像,使用 ADB 再次 push 到手机上即可。
...@@ -18,6 +18,7 @@ If you only want to test speed, please refer to [The tutorial of Paddle-Lite mob ...@@ -18,6 +18,7 @@ If you only want to test speed, please refer to [The tutorial of Paddle-Lite mob
- [2.1.1 [RECOMMEND] Use pip to install Paddle-Lite and optimize model](#2.1.1) - [2.1.1 [RECOMMEND] Use pip to install Paddle-Lite and optimize model](#2.1.1)
- [2.1.2 Compile Paddle-Lite to generate opt tool](#2.1.2) - [2.1.2 Compile Paddle-Lite to generate opt tool](#2.1.2)
- [2.1.3 Demo of get the optimized model](#2.1.3) - [2.1.3 Demo of get the optimized model](#2.1.3)
- [2.1.4 Compile to get the executable file clas_system](#2.1.4)
- [2.2 Run optimized model on Phone](#2.2) - [2.2 Run optimized model on Phone](#2.2)
- [3. FAQ](#3) - [3. FAQ](#3)
...@@ -40,8 +41,8 @@ For the detailed compilation directions of different development environments, p ...@@ -40,8 +41,8 @@ For the detailed compilation directions of different development environments, p
|Platform|Inference Library Download Link| |Platform|Inference Library Download Link|
|-|-| |-|-|
|Android|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/Android/gcc/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)| |Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv7.clang.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv.tar.gz) |
|iOS|[arm7](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://paddlelite-data.bj.bcebos.com/Release/2.8-rc/iOS/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)| |iOS|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv7.with_cv.with_extra.tiny_publish.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.ios.armv8.with_cv.with_extra.tiny_publish.tar.gz)|
**NOTE**: **NOTE**:
...@@ -53,7 +54,7 @@ For the detailed compilation directions of different development environments, p ...@@ -53,7 +54,7 @@ For the detailed compilation directions of different development environments, p
The structure of the inference library is as follows: The structure of the inference library is as follows:
``` ```
inference_lite_lib.android.armv8/ inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/
|-- cxx C++ inference library and header files |-- cxx C++ inference library and header files
| |-- include C++ header files | |-- include C++ header files
| | |-- paddle_api.h | | |-- paddle_api.h
...@@ -148,6 +149,23 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel -- ...@@ -148,6 +149,23 @@ paddle_lite_opt --model_file=./MobileNetV3_large_x1_0_infer/inference.pdmodel --
``` ```
When the above code command is completed, there will be ``MobileNetV3_large_x1_0.nb` in the current directory, which is the converted model file. When the above code command is completed, there will be ``MobileNetV3_large_x1_0.nb` in the current directory, which is the converted model file.
<a name="2.1.4"></a>
#### 2.1.4 Compile to get the executable file clas_system
```shell
# Clone the Autolog repository to get automation logs
cd PaddleClas_root_path
cd deploy/lite/
git clone https://github.com/LDOUBLEV/AutoLog.git
```
```shell
# Compile
make -j
```
After executing the `make` command, the `clas_system` executable file is generated in the current directory, which is used for Lite prediction.
<a name="2.2"></a> <a name="2.2"></a>
## 2.2 Run optimized model on Phone ## 2.2 Run optimized model on Phone
...@@ -172,7 +190,7 @@ When the above code command is completed, there will be ``MobileNetV3_large_x1_0 ...@@ -172,7 +190,7 @@ When the above code command is completed, there will be ``MobileNetV3_large_x1_0
* Install ADB for windows * Install ADB for windows
If install ADB fo Windows, you need to download from Google's Android platform: [Download Link](https://developer.android.com/studio). If install ADB fo Windows, you need to download from Google's Android platform: [Download Link](https://developer.android.com/studio).
First, make sure the phone is connected to the computer, turn on the `USB debugging` option of the phone, and select the `file transfer` mode. Verify whether ADB is installed successfully as follows: 3. First, make sure the phone is connected to the computer, turn on the `USB debugging` option of the phone, and select the `file transfer` mode. Verify whether ADB is installed successfully as follows:
```shell ```shell
$ adb devices $ adb devices
...@@ -183,42 +201,22 @@ When the above code command is completed, there will be ``MobileNetV3_large_x1_0 ...@@ -183,42 +201,22 @@ When the above code command is completed, there will be ``MobileNetV3_large_x1_0
If there is `device` output like the above, it means the installation was successful. If there is `device` output like the above, it means the installation was successful.
4. Prepare optimized model, inference library files, test image and dictionary file used. 4. Push the optimized model, prediction library file, test image and class map file to the phone.
```shell ```shell
cd PaddleClas_root_path ```shell
cd deploy/lite/ adb shell mkdir -p /data/local/tmp/arm_cpu/
adb push clas_system /data/local/tmp/arm_cpu/
# prepare.sh will put the inference library files, the test image and the dictionary files in demo/cxx/clas adb shell chmod +x /data/local/tmp/arm_cpu//clas_system
sh prepare.sh /{lite inference library path}/inference_lite_lib.android.armv8 adb push inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv/cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/arm_cpu/
adb push MobileNetV3_large_x1_0.nb /data/local/tmp/arm_cpu/
# enter the working directory of lite demo adb push config.txt /data/local/tmp/arm_cpu/
cd /{lite inference library path}/inference_lite_lib.android.armv8/ adb push ../../ppcls/utils/imagenet1k_label_list.txt /data/local/tmp/arm_cpu/
cd demo/cxx/clas/ adb push imgs/tabby_cat.jpg /data/local/tmp/arm_cpu/
# copy the C++ inference dynamic library file (ie. .so) to the debug folder
cp ../../../cxx/lib/libpaddle_light_api_shared.so ./debug/
``` ```
The `prepare.sh` take `PaddleClas/deploy/lite/imgs/tabby_cat.jpg` as the test image, and copy it to the `demo/cxx/clas/debug/` directory.
You should put the model that optimized by `paddle_lite_opt` under the `demo/cxx/clas/debug/` directory. In this example, use `MobileNetV3_large_x1_0.nb` model file generated in [2.1.3](#2.1.3). You should put the model that optimized by `paddle_lite_opt` under the `demo/cxx/clas/debug/` directory. In this example, use `MobileNetV3_large_x1_0.nb` model file generated in [2.1.3](#2.1.3).
The structure of the clas demo is as follows after the above command is completed:
```
demo/cxx/clas/
|-- debug/
| |--MobileNetV3_large_x1_0.nb class model
| |--tabby_cat.jpg test image
| |--imagenet1k_label_list.txt dictionary file
| |--libpaddle_light_api_shared.so C++ .so file
| |--config.txt config file
|-- config.txt config file
|-- image_classfication.cpp source code
|-- Makefile compile file
```
**NOTE**: **NOTE**:
* `Imagenet1k_label_list.txt` is the category mapping file of the `ImageNet1k` dataset. If use a custom category, you need to replace the category mapping file. * `Imagenet1k_label_list.txt` is the category mapping file of the `ImageNet1k` dataset. If use a custom category, you need to replace the category mapping file.
...@@ -229,33 +227,22 @@ clas_model_file ./MobileNetV3_large_x1_0.nb # path of model file ...@@ -229,33 +227,22 @@ clas_model_file ./MobileNetV3_large_x1_0.nb # path of model file
label_path ./imagenet1k_label_list.txt # path of category mapping file label_path ./imagenet1k_label_list.txt # path of category mapping file
resize_short_size 256 # the short side length after resize resize_short_size 256 # the short side length after resize
crop_size 224 # side length used for inference after cropping crop_size 224 # side length used for inference after cropping
visualize 0 # whether to visualize. If you set it to 1, an image file named 'clas_result.png' will be generated in the current directory. visualize 0 # whether to visualize. If you set it to 1, an image file named 'clas_result.png' will be generated in the current directory.
num_threads 1 # The number of threads, the default is 1
precision FP32 # Precision type, you can choose FP32 or INT8, the default is FP32
runtime_device arm_cpu # Device type, the default is arm_cpu
enable_benchmark 0 # Whether to enable benchmark, the default is 0
tipc_benchmark 0 # Whether to enable tipc_benchmark, the default is 0
``` ```
5. Run Model on Phone 5. Run Model on Phone
```shell Execute the following command to complete the prediction on the mobile phone.
# run compile to get the executable file 'clas_system'
make -j
# move the compiled executable file to the debug folder
mv clas_system ./debug/
# push the debug folder to Phone
adb push debug /data/local/tmp/
adb shell ```shell
cd /data/local/tmp/debug adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/clas_system /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/tabby_cat.jpg'
export LD_LIBRARY_PATH=/data/local/tmp/debug:$LD_LIBRARY_PATH
# the usage of clas_system is as follows:
# ./clas_system "path of config file" "path of test image"
./clas_system ./config.txt ./tabby_cat.jpg
``` ```
**NOTE**: If you make changes to the code, you need to recompile and repush the `debug ` folder to the phone.
The result is as follows: The result is as follows:
![](../../images/inference_deployment/lite_demo_result.png) ![](../../images/inference_deployment/lite_demo_result.png)
......
...@@ -16,6 +16,14 @@ function func_parser_value(){ ...@@ -16,6 +16,14 @@ function func_parser_value(){
echo ${tmp} echo ${tmp}
} }
function func_parser_value_lite(){
strs=$1
IFS=$2
array=(${strs})
tmp=${array[1]}
echo ${tmp}
}
function func_set_params(){ function func_set_params(){
key=$1 key=$1
value=$2 value=$2
......
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:MobileNetV3_large_x1_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:MobileNetV3_large_x1_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x0_25
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x0_5
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x0_75
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x1_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x1_5
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x2_0
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:PPLCNet_x2_5
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:ResNet50
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:ResNet50_vd
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
runtime_device:arm_cpu
lite_arm_work_path:/data/local/tmp/arm_cpu/
lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so
clas_model_file:SwinTransformer_tiny_patch4_window7_224
inference_cmd:clas_system config.txt tabby_cat.jpg
--num_threads_list:1
--batch_size_list:1
--precision_list:FP32
# Lite_arm_cpp_cpu 预测功能测试
Lite_arm_cpp_cpu 预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`,可以测试基于 Paddle-Lite 预测库的模型推理功能。
## 1. 测试结论汇总
| 模型类型 |device | batchsize | 精度类型| 线程数 |
| :----: | :----: | :----: | :----: | :----: |
| 正常模型 | arm_cpu | 1 | FP32 | 1 |
## 2. 测试流程
运行环境配置请参考[文档](https://github.com/PaddlePaddle/models/blob/release/2.2/tutorials/mobilenetv3_prod/Step6/deploy/lite_infer_cpp_arm_cpu/README.md) 的内容配置 TIPC Lite 的运行环境。
### 2.1 功能测试
先运行 `prepare_lite_arm_cpu_cpp.sh` 准备数据和模型,然后运行 `test_lite_arm_cpu_cpp.sh` 进行测试,最终在 `./output` 目录下生成 `lite_*.log` 后缀的日志文件。
```shell
bash test_tipc/prepare_lite_arm_cpu_cpp.sh test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt
```
运行预测指令后,在`./output`文件夹下自动会保存运行日志,包括以下文件:
```shell
test_tipc/output/
|- results.log # 运行指令状态的日志
|- lite_MobileNetV3_large_x1_0_runtime_device_arm_cpu_precision_FP32_batchsize_1_threads_1.log # ARM_CPU 上 FP32 状态下,线程数设置为1,测试batch_size=1条件下的预测运行日志
......
```
其中results.log中包含了每条指令的运行状态,如果运行成功会输出:
```
Run successfully with command - adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/mobilenet_v3 /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/demo.jpg' > ./output/lite_MobileNetV3_large_x1_0_runtime_device_arm_cpu_precision_FP32_batchsize_1_threads_1.log 2>&1!
......
```
如果运行失败,会输出:
```
Run failed with command - adb shell 'export LD_LIBRARY_PATH=/data/local/tmp/arm_cpu/; /data/local/tmp/arm_cpu/mobilenet_v3 /data/local/tmp/arm_cpu/config.txt /data/local/tmp/arm_cpu/demo.jpg' > ./output/lite_MobileNetV3_large_x1_0_runtime_device_arm_cpu_precision_FP32_batchsize_1_threads_1.log 2>&1!
......
```
可以很方便的根据results.log中的内容判定哪一个指令运行错误。
## 3. 更多教程
本文档为功能测试用,更详细的 Lite 预测使用教程请参考:[PaddleLite 推理部署](../../docs/zh_CN/inference_deployment/paddle_lite_deploy.md)
#!/bin/bash
source test_tipc/common_func.sh
BASIC_CONFIG="./config.txt"
CONFIG=$1
# parser tipc config
IFS=$'\n'
TIPC_CONFIG=$1
tipc_dataline=$(cat $TIPC_CONFIG)
tipc_lines=(${tipc_dataline})
runtime_device=$(func_parser_value_lite "${tipc_lines[0]}" ":")
lite_arm_work_path=$(func_parser_value_lite "${tipc_lines[1]}" ":")
lite_arm_so_path=$(func_parser_value_lite "${tipc_lines[2]}" ":")
clas_model_name=$(func_parser_value_lite "${tipc_lines[3]}" ":")
inference_cmd=$(func_parser_value_lite "${tipc_lines[4]}" ":")
num_threads_list=$(func_parser_value_lite "${tipc_lines[5]}" ":")
batch_size_list=$(func_parser_value_lite "${tipc_lines[6]}" ":")
precision_list=$(func_parser_value_lite "${tipc_lines[7]}" ":")
# Prepare config and test.sh
work_path="./deploy/lite"
cp ${CONFIG} ${work_path}
cp test_tipc/test_lite_arm_cpu_cpp.sh ${work_path}
# Prepare model
cd ${work_path}
pip3 install paddlelite==2.10
model_url="https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/${clas_model_name}_infer.tar"
wget --no-proxy ${model_url}
model_tar=$(echo ${model_url} | awk -F "/" '{print $NF}')
tar -xf ${model_tar}
paddle_lite_opt --model_dir=${clas_model_name}_infer --model_file=${clas_model_name}_infer/inference.pdmodel --param_file=${clas_model_name}_infer/inference.pdiparams --valid_targets=arm --optimize_out=${clas_model_name}
rm -rf ${clas_model_name}_infer*
# Prepare paddlelite library
paddlelite_lib_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10/inference_lite_lib.android.armv8.clang.c++_static.with_extra.with_cv.tar.gz"
wget ${paddlelite_lib_url}
paddlelite_lib_file=$(echo ${paddlelite_lib_url} | awk -F "/" '{print $NF}')
tar -xzf ${paddlelite_lib_file}
mv ${paddlelite_lib_file%*.tar.gz} inference_lite_lib.android.armv8
rm -rf ${paddlelite_lib_file%*.tar.gz}*
# Compile and obtain executable binary file
git clone https://github.com/LDOUBLEV/AutoLog.git
make
# push executable binary, library, lite model, data, etc. to arm device
adb shell mkdir -p ${lite_arm_work_path}
adb push $(echo ${inference_cmd} | awk '{print $1}') ${lite_arm_work_path}
adb shell chmod +x ${lite_arm_work_path}/$(echo ${inference_cmd} | awk '{print $1}')
adb push ${lite_arm_so_path} ${lite_arm_work_path}
adb push ${clas_model_name}.nb ${lite_arm_work_path}
adb push ${BASIC_CONFIG} ${lite_arm_work_path}
adb push ../../ppcls/utils/imagenet1k_label_list.txt ${lite_arm_work_path}
adb push imgs/$(echo ${inference_cmd} | awk '{print $3}') ${lite_arm_work_path}
#!/bin/bash
source ./test_tipc/common_func.sh
FILENAME=$1
dataline=$(cat ${FILENAME})
# parser params
IFS=$'\n'
lines=(${dataline})
IFS=$'\n'
inference_cmd=$(func_parser_value "${lines[1]}")
DEVICE=$(func_parser_value "${lines[2]}")
det_lite_model_list=$(func_parser_value "${lines[3]}")
rec_lite_model_list=$(func_parser_value "${lines[4]}")
cls_lite_model_list=$(func_parser_value "${lines[5]}")
if [[ $inference_cmd =~ "det" ]];then
lite_model_list=${det_lite_model_list}
elif [[ $inference_cmd =~ "rec" ]];then
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
elif [[ $inference_cmd =~ "system" ]];then
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
else
echo "inference_cmd is wrong, please check."
exit 1
fi
if [ ${DEVICE} = "ARM_CPU" ];then
valid_targets="arm"
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz"
end_index="66"
elif [ ${DEVICE} = "ARM_GPU_OPENCL" ];then
valid_targets="opencl"
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz"
end_index="71"
else
echo "DEVICE only suport ARM_CPU, ARM_GPU_OPENCL."
exit 2
fi
# prepare lite .nb model
pip install paddlelite==2.10-rc
current_dir=${PWD}
IFS="|"
model_path=./inference_models
for model in ${lite_model_list[*]}; do
if [[ $model =~ "PP-OCRv2" ]];then
inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar
elif [[ $model =~ "v2.0" ]];then
inference_model_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/${model}.tar
else
echo "Model is wrong, please check."
exit 3
fi
inference_model=${inference_model_url##*/}
wget -nc -P ${model_path} ${inference_model_url}
cd ${model_path} && tar -xf ${inference_model} && cd ../
model_dir=${model_path}/${inference_model%.*}
model_file=${model_dir}/inference.pdmodel
param_file=${model_dir}/inference.pdiparams
paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=${valid_targets} --optimize_out=${model_dir}_opt
done
# prepare test data
data_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
model_path=./inference_models
inference_model=${inference_model_url##*/}
data_file=${data_url##*/}
wget -nc -P ./inference_models ${inference_model_url}
wget -nc -P ./test_data ${data_url}
cd ./inference_models && tar -xf ${inference_model} && cd ../
cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../
# prepare lite env
paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
paddlelite_file=${paddlelite_zipfile:0:${end_index}}
wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile}
mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite
cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ${FILENAME} test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
cd ${paddlelite_file}/demo/cxx/ocr/
git clone https://github.com/cuicheng01/AutoLog.git
# make
make -j
sleep 1
make -j
cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
rm -rf ${paddlelite_file}* && rm -rf ${model_path}
#!/bin/bash
source ./common_func.sh
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
FILENAME=$1
dataline=$(cat $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser lite inference
inference_cmd=$(func_parser_value "${lines[1]}")
runtime_device=$(func_parser_value "${lines[2]}")
det_model_list=$(func_parser_value "${lines[3]}")
rec_model_list=$(func_parser_value "${lines[4]}")
cls_model_list=$(func_parser_value "${lines[5]}")
cpu_threads_list=$(func_parser_value "${lines[6]}")
det_batch_size_list=$(func_parser_value "${lines[7]}")
rec_batch_size_list=$(func_parser_value "${lines[8]}")
infer_img_dir_list=$(func_parser_value "${lines[9]}")
config_dir=$(func_parser_value "${lines[10]}")
rec_dict_dir=$(func_parser_value "${lines[11]}")
benchmark_value=$(func_parser_value "${lines[12]}")
if [[ $inference_cmd =~ "det" ]]; then
lite_model_list=${det_lite_model_list}
elif [[ $inference_cmd =~ "rec" ]]; then
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
elif [[ $inference_cmd =~ "system" ]]; then
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
else
echo "inference_cmd is wrong, please check."
exit 1
fi
LOG_PATH="./output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
function func_test_det(){
IFS='|'
_script=$1
_det_model=$2
_log_path=$3
_img_dir=$4
_config=$5
if [[ $_det_model =~ "slim" ]]; then
precision="INT8"
else
precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_runtime_device_${runtime_device}_precision_${precision}_det_batchsize_${det_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${runtime_device} ${precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_rec(){
IFS='|'
_script=$1
_rec_model=$2
_cls_model=$3
_log_path=$4
_img_dir=$5
_config=$6
_rec_dict_dir=$7
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_rec_model}_${cls_model}_runtime_device_${runtime_device}_precision_${_precision}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${rec_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_system(){
IFS='|'
_script=$1
_det_model=$2
_rec_model=$3
_cls_model=$4
_log_path=$5
_img_dir=$6
_config=$7
_rec_dict_dir=$8
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_${_rec_model}_${_cls_model}_runtime_device_${runtime_device}_precision_${_precision}_det_batchsize_${det_batchsize}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
done
}
echo "################### run test ###################"
if [[ $inference_cmd =~ "det" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_det "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "rec" ]]; then
IFS="|"
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_rec "${inference_cmd}" "${rec_model}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${rec_dict_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "system" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_system "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${rec_model_list[i]}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}" "${rec_dict_dir}"
done
done
fi
#!/bin/bash
source test_tipc/common_func.sh
current_path=$PWD
IFS=$'\n'
TIPC_CONFIG=$1
tipc_dataline=$(cat $TIPC_CONFIG)
tipc_lines=(${tipc_dataline})
work_path="./deploy/lite"
cd ${work_path}
BASIC_CONFIG="config.txt"
basic_dataline=$(cat $BASIC_CONFIG)
basic_lines=(${basic_dataline})
# parser basic config
label_path=$(func_parser_value_lite "${basic_lines[1]}" " ")
resize_short_size=$(func_parser_value_lite "${basic_lines[2]}" " ")
crop_size=$(func_parser_value_lite "${basic_lines[3]}" " ")
visualize=$(func_parser_value_lite "${basic_lines[4]}" " ")
enable_benchmark=$(func_parser_value_lite "${basic_lines[9]}" " ")
tipc_benchmark=$(func_parser_value_lite "${basic_lines[10]}" " ")
# parser tipc config
runtime_device=$(func_parser_value_lite "${tipc_lines[0]}" ":")
lite_arm_work_path=$(func_parser_value_lite "${tipc_lines[1]}" ":")
lite_arm_so_path=$(func_parser_value_lite "${tipc_lines[2]}" ":")
clas_model_name=$(func_parser_value_lite "${tipc_lines[3]}" ":")
inference_cmd=$(func_parser_value_lite "${tipc_lines[4]}" ":")
num_threads_list=$(func_parser_value_lite "${tipc_lines[5]}" ":")
batch_size_list=$(func_parser_value_lite "${tipc_lines[6]}" ":")
precision_list=$(func_parser_value_lite "${tipc_lines[7]}" ":")
LOG_PATH=${current_path}"/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
#run Lite TIPC
function func_test_tipc(){
IFS="|"
_basic_config=$1
_model_name=$2
_log_path=$3
for num_threads in ${num_threads_list[*]}; do
if [ $(uname) = "Darwin" ]; then
sed -i " " "s/num_threads.*/num_threads ${num_threads}/" ${_basic_config}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
sed -i "s/num_threads.*/num_threads ${num_threads}/" ${_basic_config}
fi
for batch_size in ${batch_size_list[*]}; do
if [ $(uname) = "Darwin" ]; then
sed -i " " "s/batch_size.*/batch_size ${batch_size}/" ${_basic_config}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
sed -i "s/batch_size.*/batch_size ${batch_size}/" ${_basic_config}
fi
for precision in ${precision_list[*]}; do
if [ $(uname) = "Darwin" ]; then
sed -i " " "s/precision.*/precision ${precision}/" ${_basic_config}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
sed -i "s/precision.*/precision ${precision}/" ${_basic_config}
fi
_save_log_path="${_log_path}/lite_${_model_name}_runtime_device_${runtime_device}_precision_${precision}_batchsize_${batch_size}_threads_${num_threads}.log"
real_inference_cmd=$(echo ${inference_cmd} | awk -F " " '{print path $1" "path $2" "path $3}' path="$lite_arm_work_path")
command1="adb push ${_basic_config} ${lite_arm_work_path}"
eval ${command1}
command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1"
eval ${command2}
status_check $? "${command2}" "${status_log}"
done
done
done
}
echo "################### run test tipc ###################"
label_map=$(echo ${label_path} | awk -F "/" '{print $NF}')
if [ $(uname) = "Darwin" ]; then
# for Mac
sed -i " " "s/runtime_device.*/runtime_device arm_cpu/" ${BASIC_CONFIG}
escape_lite_arm_work_path=$(echo ${lite_arm_work_path//\//\\\/})
sed -i " " "s/clas_model_file.*/clas_model_file ${escape_lite_arm_work_path}${clas_model_name}.nb/" ${BASIC_CONFIG}
sed -i " " "s/label_path.*/label_path ${escape_lite_arm_work_path}${label_map}/" ${BASIC_CONFIG}
sed -i " " "s/tipc_benchmark.*/tipc_benchmark 1/" ${BASIC_CONFIG}
elif [ $(expr substr $(uname -s) 1 5) = "Linux"]; then
# for Linux
sed -i "s/runtime_device.*/runtime_device arm_cpu/" ${BASIC_CONFIG}
escape_lite_arm_work_path=$(echo ${lite_arm_work_path//\//\\\/})
sed -i "s/clas_model_file.*/clas_model_file ${escape_lite_arm_work_path}${clas_model_name}/" ${BASIC_CONFIG}
sed -i "s/label_path.*/label_path ${escape_lite_arm_work_path}${label_path}/" ${BASIC_CONFIG}
sed -i "s/tipc_benchmark.*/tipc_benchmark 1/" ${BASIC_CONFIG}
fi
func_test_tipc ${BASIC_CONFIG} ${clas_model_name} ${LOG_PATH}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册