未验证 提交 c8b51f82 编写于 作者: Y yiicy 提交者: GitHub

[Demo] add cxx mobilenetv1-ssd detection demo, test=develop (#2541)

* [Demo] add cxx mobilenetv1-ssd detection demo, test=develop

* add makefile to mobile detection demo, test=develop

* [Demo] add cxx mobilenetv1-ssd detection demo, test=develop

* [demo] fix mobile_detection code style, test=develop

* [Demo] fix demo code style, test=develop

* [Demo] fix detection demo makefile dependency, test=develop
上级 137d7a6d
......@@ -216,6 +216,8 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_full/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_full/Makefile"
COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mobile_light" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_light/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_light/Makefile"
COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mobile_detection" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_detection/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_detection/Makefile"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/include"
)
add_dependencies(publish_inference_android_cxx_demos logging gflags)
......@@ -228,6 +230,9 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/README.md" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mobile_light" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_light/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_light/Makefile"
COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mobile_detection" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_detection/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_detection/Makefile"
)
add_dependencies(tiny_publish_cxx_lib publish_inference_android_cxx_demos)
endif()
......
CXX_DEFINES = -DARM_WITH_OMP -DHPPL_STUB_FUNC -DLITE_WITH_ARM -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK \
-DLITE_WITH_LINUX -DPADDLE_DISABLE_PROFILER -DPADDLE_NO_PYTHON -DPADDLE_WITH_TESTING
LDFLAGS = -latomic -pthread -ldl -llog
LDFLAGS = -latomic -pthread -ldl -llog -lz
SYSROOT_COMPLILE = --sysroot=/opt/android-ndk-r17c/sysroot
THIRD_PARTY_LIBS = ../../../third_party/gflags/lib/libgflags.a
SYSTEM_INCLUDES = -I/opt/android-ndk-r17c/sources/cxx-stl/llvm-libc++/include \
-I/opt/android-ndk-r17c/sources/cxx-stl/llvm-libc++abi/include \
-I/opt/android-ndk-r17c/sources/android/support/include \
-I/opt/android-ndk-r17c/sysroot/usr/include \
THIRD_PARTY_INCLUDES = -I../../../third_party/gflags/include
ifeq ($(ARM_ABI), arm8)
CC = /opt/android-ndk-r17c/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/bin/aarch64-linux-android-g++
CXX_FLAGS = -funwind-tables -no-canonical-prefixes -D__ANDROID_API__=23 -fexceptions -frtti -std=c++11 -fopenmp -O3 -DNDEBUG -fPIE
CXX_FLAGS = -funwind-tables -no-canonical-prefixes -D__ANDROID_API__=23 -fexceptions -frtti -std=c++11 -fopenmp -O3 -DNDEBUG -fPIE
CXXFLAGS_LINK = $(CXX_FLAGS) -pie -Wl,--gc-sections
SYSROOT_LINK = --sysroot=/opt/android-ndk-r17c/platforms/android-24/arch-arm64
SYSTEM_LIBS = /opt/android-ndk-r17c/sources/cxx-stl/llvm-libc++/libs/arm64-v8a/libc++_static.a \
/opt/android-ndk-r17c/sources/cxx-stl/llvm-libc++/libs/arm64-v8a/libc++abi.a
INCLUDES = $(SYSTEM_INCLUDES) -I/opt/android-ndk-r17c/sysroot/usr/include/aarch64-linux-android $(THIRD_PARTY_INCLUDES)
INCLUDES = $(SYSTEM_INCLUDES) -I/opt/android-ndk-r17c/sysroot/usr/include/aarch64-linux-android
else
CC = /opt/android-ndk-r17c/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin/arm-linux-androideabi-g++
CXX_FLAGS = -march=armv7-a -mthumb -mfpu=neon -mfloat-abi=softfp -funwind-tables -no-canonical-prefixes \
......@@ -31,5 +27,5 @@ else
/opt/android-ndk-r17c/sources/cxx-stl/llvm-libc++/libs/armeabi-v7a/libc++abi.a \
/opt/android-ndk-r17c/sources/cxx-stl/llvm-libc++/libs/armeabi-v7a/libandroid_support.a \
/opt/android-ndk-r17c/sources/cxx-stl/llvm-libc++/libs/armeabi-v7a/libunwind.a
INCLUDES = $(SYSTEM_INCLUDES) -I/opt/android-ndk-r17c/sysroot/usr/include/arm-linux-androideabi $(THIRD_PARTY_INCLUDES)
INCLUDES = $(SYSTEM_INCLUDES) -I/opt/android-ndk-r17c/sysroot/usr/include/arm-linux-androideabi
endif
# C++ Demo
1. 使用`lite/tools/Dockerfile.mobile`生成docker镜像
2. 运行并进入docker镜像环境,执行`wget http://paddle-inference-dist.bj.bcebos.com/lite_release/r0.1/inference_lite_lib.android.armv8.tar.gz `下载所需demo环境。(armv7 demo可使用命令`wget http://paddle-inference-dist.bj.bcebos.com/lite_release/r0.1/inference_lite_lib.android.armv7.tar.gz` 进行下载)。
2. 运行并进入docker镜像环境,执行`wget http://paddle-inference-dist.bj.bcebos.com/lite_release/v2.1.0/inference_lite_lib.android.armv8.tar.gz `下载所需demo环境。(armv7 demo可使用命令`wget http://paddle-inference-dist.bj.bcebos.com/lite_release/v2.1.0/inference_lite_lib.android.armv7.tar.gz` 进行下载)。
3. 解压下载文件`tar zxvf inference_lite_lib.android.armv8.tar.gz `
4. 执行以下命令准备模拟器环境
```shell
......@@ -27,8 +27,10 @@ tar zxvf mobilenet_v1.tar.gz
make
adb -s emulator-5554 push mobilenet_v1 /data/local/tmp/
adb -s emulator-5554 push mobilenetv1_full_api /data/local/tmp/
adb -s emulator-5554 push ../../../cxx/lib/libpaddle_full_api_shared.so /data/local/tmp/
adb -s emulator-5554 shell chmod +x /data/local/tmp/mobilenetv1_full_api
adb -s emulator-5554 shell "/data/local/tmp/mobilenetv1_full_api --model_dir=/data/local/tmp/mobilenet_v1 --optimized_model_dir=/data/local/tmp/mobilenet_v1.opt"
adb -s emulator-5554 shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH &&
/data/local/tmp/mobilenetv1_full_api --model_dir=/data/local/tmp/mobilenet_v1 --optimized_model_dir=/data/local/tmp/mobilenet_v1.opt"
```
运行成功将在控制台输出预测结果的前10个类别的预测概率
......@@ -37,6 +39,24 @@ adb -s emulator-5554 shell "/data/local/tmp/mobilenetv1_full_api --model_dir=/da
cd ../mobile_light
make
adb -s emulator-5554 push mobilenetv1_light_api /data/local/tmp/
adb -s emulator-5554 push ../../../cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/
adb -s emulator-5554 shell chmod +x /data/local/tmp/mobilenetv1_light_api
adb -s emulator-5554 shell "/data/local/tmp/mobilenetv1_light_api --model_dir=/data/local/tmp/mobilenet_v1.opt"
adb -s emulator-5554 shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH &&
/data/local/tmp/mobilenetv1_light_api /data/local/tmp/mobilenet_v1.opt"
```
7. 编译并运行目标检测的demo
```shell
cd ../mobile_detection
wget https://paddle-inference-dist.bj.bcebos.com/mobilenetv1-ssd.tar.gz
tar zxvf mobilenetv1-ssd.tar.gz
make
adb -s emulator-5554 push mobile_detection /data/local/tmp/
adb -s emulator-5554 push test.jpg /data/local/tmp/
adb -s emulator-5554 push ../../../cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/
adb -s emulator-5554 shell chmod +x /data/local/tmp/mobile_detection
adb -s emulator-5554 shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH &&
/data/local/tmp/mobile_detection /data/local/tmp/mobilenetv1-ssd /data/local/tmp/test.jpg"
adb -s emulator-5554 pull /data/local/tmp/test_detection_result.jpg ./
```
运行成功将在mobile_detection目录下看到生成的目标检测结果图像: test_detection_result.jpg
ARM_ABI = arm7
export ARM_ABI
include ../Makefile.def
LITE_ROOT=../../../
THIRD_PARTY_DIR=${LITE_ROOT}/third_party
OPENCV_VERSION=opencv4.1.0
OPENCV_LIBS = ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgcodecs.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgproc.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_core.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtegra_hal.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjpeg-turbo.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibwebp.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibpng.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjasper.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibtiff.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libIlmImf.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtbb.a \
../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libcpufeatures.a
OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/armeabi-v7a/include
CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include
CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)
###############################################################
# How to use one of static libaray: #
# `libpaddle_api_full_bundled.a` #
# `libpaddle_api_light_bundled.a` #
###############################################################
# Note: default use lite's shared library. #
###############################################################
# 1. Comment above line using `libpaddle_light_api_shared.so`
# 2. Undo comment below line using `libpaddle_api_light_bundled.a`
#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)
mobile_detection: fetch_opencv mobile_detection.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mobile_detection.o -o mobile_detection $(CXX_LIBS) $(LDFLAGS)
mobile_detection.o: mobile_detection.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o mobile_detection.o -c mobile_detection.cc
fetch_opencv:
@ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}
@ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \
(echo "fetch opencv libs" && \
wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz)
@ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \
tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}
.PHONY: clean
clean:
rm -f mobile_detection.o
rm -f mobile_detection
ARM_ABI = arm8
export ARM_ABI
include ../Makefile.def
LITE_ROOT=../../../
THIRD_PARTY_DIR=${LITE_ROOT}/third_party
OPENCV_VERSION=opencv4.1.0
OPENCV_LIBS = ../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgcodecs.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgproc.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_core.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtegra_hal.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjpeg-turbo.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibwebp.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibpng.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjasper.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibtiff.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libIlmImf.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtbb.a \
../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libcpufeatures.a
OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/arm64-v8a/include
CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include
CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)
###############################################################
# How to use one of static libaray: #
# `libpaddle_api_full_bundled.a` #
# `libpaddle_api_light_bundled.a` #
###############################################################
# Note: default use lite's shared library. #
###############################################################
# 1. Comment above line using `libpaddle_light_api_shared.so`
# 2. Undo comment below line using `libpaddle_api_light_bundled.a`
#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)
mobile_detection: fetch_opencv mobile_detection.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mobile_detection.o -o mobile_detection $(CXX_LIBS) $(LDFLAGS)
mobile_detection.o: mobile_detection.cc
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o mobile_detection.o -c mobile_detection.cc
fetch_opencv:
@ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}
@ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \
(echo "fetch opencv libs" && \
wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz)
@ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \
tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}
.PHONY: clean
clean:
rm -f mobile_detection.o
rm -f mobile_detection
......@@ -5,28 +5,25 @@ include ../Makefile.def
LITE_ROOT=../../../
CXX_INCLUDES = $(INCLUDES) -I$(LITE_ROOT)/cxx/include
THIRD_PARTY_INCLUDES = -I../../../third_party/gflags/include
CXX_LIBS = $(THIRD_PARTY_LIBS) $(LITE_ROOT)/cxx/lib/libpaddle_api_full_bundled.a $(SYSTEM_LIBS)
THIRD_PARTY_LIBS = ../../../third_party/gflags/lib/libgflags.a
CXX_INCLUDES = $(INCLUDES) ${THIRD_PARTY_INCLUDES} -I$(LITE_ROOT)/cxx/include
CXX_LIBS = $(THIRD_PARTY_LIBS) -L$(LITE_ROOT)/cxx/lib/ -lpaddle_full_api_shared $(SYSTEM_LIBS)
###############################################################
# How to use one of shared libraries: #
# `libpaddle_full_api_shared.so` #
# `libpaddle_light_api_shared.so` #
# How to use one of static libaray: #
# `libpaddle_api_full_bundled.a` #
# `libpaddle_api_light_bundled.a` #
###############################################################
# Note: default use lite's static library. #
# Note: default use lite's shared library. #
###############################################################
# 1. Comment above line using `libpaddle_api_full_bundled.a`;
# 2. Undo comment below line and execute
# `export LD_LIBRARY_PATH=<libpaddle_full_api_shared.so dir>`
# in command line before `make`;
# 3. After `adb push` model and mobilenetv1_full_api files to
# android devices, execute
# `export LD_LIBRARY_PATH=<libpaddle_full_api_shared.so android dir>`
# and `mobilenetv1_full_api` in android `adb shell`;
# 4. Get executed result of `mobilenetv1_full_api` in android.
# CXX_LIBS = $(THIRD_PARTY_LIBS) -L$(LITE_ROOT)/cxx/lib/ -lpaddle_full_api_shared $(SYSTEM_LIBS)
# 1. Comment above line using `libpaddle_full_api_shared.so`
# 2. Undo comment below line using `libpaddle_api_full_bundled.a`
#CXX_LIBS = $(THIRD_PARTY_LIBS) $(LITE_ROOT)/cxx/lib/libpaddle_api_full_bundled.a $(SYSTEM_LIBS)
mobilenetv1_full_api: mobilenetv1_full_api.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mobilenetv1_full_api.o -o mobilenetv1_full_api $(CXX_LIBS) $(LDFLAGS)
......
......@@ -5,28 +5,25 @@ include ../Makefile.def
LITE_ROOT=../../../
CXX_INCLUDES = $(INCLUDES) -I$(LITE_ROOT)/cxx/include
THIRD_PARTY_INCLUDES = -I../../../third_party/gflags/include
CXX_LIBS = $(THIRD_PARTY_LIBS) $(LITE_ROOT)/cxx/lib/libpaddle_api_full_bundled.a $(SYSTEM_LIBS)
THIRD_PARTY_LIBS = ../../../third_party/gflags/lib/libgflags.a
CXX_INCLUDES = $(INCLUDES) ${THIRD_PARTY_INCLUDES} -I$(LITE_ROOT)/cxx/include
CXX_LIBS = $(THIRD_PARTY_LIBS) -L$(LITE_ROOT)/cxx/lib/ -lpaddle_full_api_shared $(SYSTEM_LIBS)
###############################################################
# How to use one of shared libraries: #
# `libpaddle_full_api_shared.so` #
# `libpaddle_light_api_shared.so` #
# How to use one of static libaray: #
# `libpaddle_api_full_bundled.a` #
# `libpaddle_api_light_bundled.a` #
###############################################################
# Note: default use lite's static library. #
# Note: default use lite's shared library. #
###############################################################
# 1. Comment above line using `libpaddle_api_full_bundled.a`;
# 2. Undo comment below line and execute
# `export LD_LIBRARY_PATH=<libpaddle_full_api_shared.so dir>`
# in command line before `make`;
# 3. After `adb push` model and mobilenetv1_full_api files to
# android devices, execute
# `export LD_LIBRARY_PATH=<libpaddle_full_api_shared.so android dir>`
# and `mobilenetv1_full_api` in android `adb shell`;
# 4. Get executed result of `mobilenetv1_full_api` in android.
# CXX_LIBS = $(THIRD_PARTY_LIBS) -L$(LITE_ROOT)/cxx/lib/ -lpaddle_full_api_shared $(SYSTEM_LIBS)
# 1. Comment above line using `libpaddle_full_api_shared.so`
# 2. Undo comment below line using `libpaddle_api_full_bundled.a`
#CXX_LIBS = $(THIRD_PARTY_LIBS) $(LITE_ROOT)/cxx/lib/libpaddle_api_full_bundled.a $(SYSTEM_LIBS)
mobilenetv1_full_api: mobilenetv1_full_api.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mobilenetv1_full_api.o -o mobilenetv1_full_api $(CXX_LIBS) $(LDFLAGS)
......
......@@ -7,26 +7,19 @@ LITE_ROOT=../../../
CXX_INCLUDES = $(INCLUDES) -I$(LITE_ROOT)/cxx/include
CXX_LIBS = $(THIRD_PARTY_LIBS) $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)
CXX_LIBS = -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)
###############################################################
# How to use one of shared libraries: #
# `libpaddle_light_api_shared.so` #
# `libpaddle_full_api_shared.so` #
# How to use one of static libaray: #
# `libpaddle_api_full_bundled.a` #
# `libpaddle_api_light_bundled.a` #
###############################################################
# Note: default use lite's static library. #
# Note: default use lite's shared library. #
###############################################################
# 1. Comment above line using `libpaddle_api_light_bundled.a`;
# 2. Undo comment below line and execute
# `export LD_LIBRARY_PATH=<libpaddle_light_api_shared.so dir>`
# in command line before `make`;
# 3. After `adb push` model and mobilenetv1_light_api files to
# android devices, execute
# `export LD_LIBRARY_PATH=<libpaddle_light_api_shared.so android dir>`
# and `mobilenetv1_light_api` in android `adb shell`;
# 4. Get executed result of `mobilenetv1_light_api` in android.
# CXX_LIBS = $(THIRD_PARTY_LIBS) -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)
# 1. Comment above line using `libpaddle_light_api_shared.so`
# 2. Undo comment below line using `libpaddle_api_light_bundled.a`
#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)
mobilenetv1_light_api: mobilenetv1_light_api.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mobilenetv1_light_api.o -o mobilenetv1_light_api $(CXX_LIBS) $(LDFLAGS)
......
......@@ -7,26 +7,19 @@ LITE_ROOT=../../../
CXX_INCLUDES = $(INCLUDES) -I$(LITE_ROOT)/cxx/include
CXX_LIBS = $(THIRD_PARTY_LIBS) $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)
CXX_LIBS = -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)
###############################################################
# How to use one of shared libaray: #
# `libpaddle_light_api_shared.so` #
# `libpaddle_full_api_shared.so` #
# How to use one of static libaray: #
# `libpaddle_api_full_bundled.a` #
# `libpaddle_api_light_bundled.a` #
###############################################################
# Note: default use lite's tatic library. #
# Note: default use lite's shared library. #
###############################################################
# 1. Comment above line using `libpaddle_api_light_bundled.a`;
# 2. Undo comment below line and execute
# `export LD_LIBRARY_PATH=<libpaddle_light_api_shared dir>`
# in command line before `make`;
# 3. After `adb push` model and mobilenetv1_light_api files to
# android devices, execute
# `export LD_LIBRARY_PATH=<libpaddle_light_api_shared android dir>` and
# `mobilenetv1_light_api` in android `adb shell`;
# 4. Get executed result of `mobilenetv1_light_api` in android.
# CXX_LIBS = $(THIRD_PARTY_LIBS) -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)
# 1. Comment above line using `libpaddle_light_api_shared.so`
# 2. Undo comment below line using `libpaddle_api_light_bundled.a`
#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)
mobilenetv1_light_api: mobilenetv1_light_api.o
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mobilenetv1_light_api.o -o mobilenetv1_light_api $(CXX_LIBS) $(LDFLAGS)
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <vector>
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "paddle_api.h" // NOLINT
using namespace paddle::lite_api; // NOLINT
struct Object {
int batch_id;
cv::Rect rec;
int class_id;
float prob;
};
int64_t ShapeProduction(const shape_t& shape) {
int64_t res = 1;
for (auto i : shape) res *= i;
return res;
}
const char* class_names[] = {
"background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train",
"tvmonitor"};
// fill tensor with mean and scale and trans layout: nhwc -> nchw, neon speed up
void neon_mean_scale(const float* din,
float* dout,
int size,
const std::vector<float> mean,
const std::vector<float> scale) {
if (mean.size() != 3 || scale.size() != 3) {
std::cerr << "[ERROR] mean or scale size must equal to 3\n";
exit(1);
}
float32x4_t vmean0 = vdupq_n_f32(mean[0]);
float32x4_t vmean1 = vdupq_n_f32(mean[1]);
float32x4_t vmean2 = vdupq_n_f32(mean[2]);
float32x4_t vscale0 = vdupq_n_f32(1.f / scale[0]);
float32x4_t vscale1 = vdupq_n_f32(1.f / scale[1]);
float32x4_t vscale2 = vdupq_n_f32(1.f / scale[2]);
float* dout_c0 = dout;
float* dout_c1 = dout + size;
float* dout_c2 = dout + size * 2;
int i = 0;
for (; i < size - 3; i += 4) {
float32x4x3_t vin3 = vld3q_f32(din);
float32x4_t vsub0 = vsubq_f32(vin3.val[0], vmean0);
float32x4_t vsub1 = vsubq_f32(vin3.val[1], vmean1);
float32x4_t vsub2 = vsubq_f32(vin3.val[2], vmean2);
float32x4_t vs0 = vmulq_f32(vsub0, vscale0);
float32x4_t vs1 = vmulq_f32(vsub1, vscale1);
float32x4_t vs2 = vmulq_f32(vsub2, vscale2);
vst1q_f32(dout_c0, vs0);
vst1q_f32(dout_c1, vs1);
vst1q_f32(dout_c2, vs2);
din += 12;
dout_c0 += 4;
dout_c1 += 4;
dout_c2 += 4;
}
for (; i < size; i++) {
*(dout_c0++) = (*(din++) - mean[0]) * scale[0];
*(dout_c0++) = (*(din++) - mean[1]) * scale[1];
*(dout_c0++) = (*(din++) - mean[2]) * scale[2];
}
}
void pre_process(const cv::Mat& img, int width, int height, float* data) {
cv::Mat rgb_img;
cv::cvtColor(img, rgb_img, cv::COLOR_BGR2RGB);
cv::resize(rgb_img, rgb_img, cv::Size(width, height), 0.f, 0.f);
cv::Mat imgf;
rgb_img.convertTo(imgf, CV_32FC3, 1 / 255.f);
std::vector<float> mean = {0.5f, 0.5f, 0.5f};
std::vector<float> scale = {0.5f, 0.5f, 0.5f};
const float* dimg = reinterpret_cast<const float*>(imgf.data);
neon_mean_scale(dimg, data, width * height, mean, scale);
}
std::vector<Object> detect_object(const float* data,
int count,
float thresh,
cv::Mat& image) { // NOLINT
if (data == nullptr) {
std::cerr << "[ERROR] data can not be nullptr\n";
exit(1);
}
std::vector<Object> rect_out;
for (int iw = 0; iw < count; iw++) {
int oriw = image.cols;
int orih = image.rows;
if (data[1] > thresh && static_cast<int>(data[0]) > 0) {
Object obj;
int x = static_cast<int>(data[2] * oriw);
int y = static_cast<int>(data[3] * orih);
int w = static_cast<int>(data[4] * oriw) - x;
int h = static_cast<int>(data[5] * orih) - y;
cv::Rect rec_clip =
cv::Rect(x, y, w, h) & cv::Rect(0, 0, image.cols, image.rows);
obj.batch_id = 0;
obj.class_id = static_cast<int>(data[0]);
obj.prob = data[1];
obj.rec = rec_clip;
if (w > 0 && h > 0 && obj.prob <= 1) {
rect_out.push_back(obj);
cv::rectangle(image, rec_clip, cv::Scalar(0, 0, 255), 2, cv::LINE_AA);
std::string str_prob = std::to_string(obj.prob);
std::string text = std::string(class_names[obj.class_id]) + ": " +
str_prob.substr(0, str_prob.find(".") + 4);
int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL;
double font_scale = 1.f;
int thickness = 2;
cv::Size text_size =
cv::getTextSize(text, font_face, font_scale, thickness, nullptr);
float new_font_scale = w * 0.35 * font_scale / text_size.width;
text_size = cv::getTextSize(
text, font_face, new_font_scale, thickness, nullptr);
cv::Point origin;
origin.x = x + 10;
origin.y = y + text_size.height + 10;
cv::putText(image,
text,
origin,
font_face,
new_font_scale,
cv::Scalar(0, 255, 255),
thickness,
cv::LINE_AA);
std::cout << "detection, image size: " << image.cols << ", "
<< image.rows
<< ", detect object: " << class_names[obj.class_id]
<< ", score: " << obj.prob << ", location: x=" << x
<< ", y=" << y << ", width=" << w << ", height=" << h
<< std::endl;
}
}
data += 6;
}
return rect_out;
}
void RunModel(std::string model_dir, std::string img_path) {
// 1. Set MobileConfig
MobileConfig config;
config.set_model_dir(model_dir);
// 2. Create PaddlePredictor by MobileConfig
std::shared_ptr<PaddlePredictor> predictor =
CreatePaddlePredictor<MobileConfig>(config);
// 3. Prepare input data from image
std::unique_ptr<Tensor> input_tensor(std::move(predictor->GetInput(0)));
const int in_width = 300;
const int in_height = 300;
input_tensor->Resize({1, 3, in_height, in_width});
auto* data = input_tensor->mutable_data<float>();
cv::Mat img = imread(img_path, cv::IMREAD_COLOR);
pre_process(img, in_width, in_height, data);
// 4. Run predictor
predictor->Run();
// 5. Get output and post process
std::unique_ptr<const Tensor> output_tensor(
std::move(predictor->GetOutput(0)));
auto* outptr = output_tensor->data<float>();
auto shape_out = output_tensor->shape();
int64_t cnt = 1;
for (auto& i : shape_out) {
cnt *= i;
}
auto rec_out = detect_object(outptr, static_cast<int>(cnt / 6), 0.6f, img);
std::string result_name =
img_path.substr(0, img_path.find(".")) + "_detection_result.jpg";
cv::imwrite(result_name, img);
}
int main(int argc, char** argv) {
if (argc < 3) {
std::cerr << "[ERROR] usage: " << argv[0] << " model_dir image_path\n";
exit(1);
}
std::string model_dir = argv[1];
std::string img_path = argv[2];
RunModel(model_dir, img_path);
return 0;
}
......@@ -13,12 +13,10 @@
// limitations under the License.
#include <gflags/gflags.h>
#include <stdio.h>
#include <iostream>
#include <vector>
#include "paddle_api.h" // NOLINT
#include "paddle_use_kernels.h" // NOLINT
#include "paddle_use_ops.h" // NOLINT
#include "paddle_use_passes.h" // NOLINT
#include "paddle_api.h" // NOLINT
#include "paddle_use_passes.h" // NOLINT
using namespace paddle::lite_api; // NOLINT
......@@ -32,27 +30,6 @@ int64_t ShapeProduction(const shape_t& shape) {
return res;
}
void CheckInput(char*** argv) {
if (FLAGS_model_dir == "") {
printf(
"Usage: %s --model_dir=<your-model-directory> "
"--optimized_model_dir=<your-optmized-model-directory> "
"--prefer_int8_kernel=[true|false]\n",
*argv[0]);
exit(1);
}
if (FLAGS_optimized_model_dir == "") {
FLAGS_optimized_model_dir = FLAGS_model_dir;
printf(
"[WARN] no `optimized_model_dir` provided. set `optimized_model_dir` "
":= `model_dir`:%s\n",
FLAGS_optimized_model_dir.c_str());
}
printf("[WARN] model_dir:%s\n", FLAGS_model_dir.c_str());
printf("[WARN] optimized_model_dir:%s\n", FLAGS_optimized_model_dir.c_str());
printf("[WARN] prefer_int8_kernel:%s\n", FLAGS_prefer_int8_kernel);
}
// 0. Enable OpenCL, if needed
// Enable `DEMO_WITH_OPENCL` macro below, if user need use gpu(opencl)
// #define DEMO_WITH_OPENCL
......@@ -99,15 +76,22 @@ void RunModel() {
// 6. Get output
std::unique_ptr<const Tensor> output_tensor(
std::move(predictor->GetOutput(0)));
printf("Output dim: %d\n", output_tensor->shape()[1]);
std::cout << "Output shape " << output_tensor->shape()[1] << std::endl;
for (int i = 0; i < ShapeProduction(output_tensor->shape()); i += 100) {
printf("Output[%d]: %f\n", i, output_tensor->data<float>()[i]);
std::cout << "Output[" << i << "]: " << output_tensor->data<float>()[i]
<< std::endl;
}
}
int main(int argc, char** argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
CheckInput(&argv);
if (FLAGS_model_dir == "" || FLAGS_optimized_model_dir == "") {
std::cerr << "[ERROR] usage: " << argv[0]
<< " --model_dir=<your-model-directory>"
<< " --optimized_model_dir=<your-optmized-model-directory> "
<< " --prefer_int8_kernel=[true|false]\n";
exit(1);
}
RunModel();
return 0;
}
......@@ -12,35 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <stdio.h>
#include <iostream>
#include <vector>
#include "paddle_api.h" // NOLINT
#include "paddle_use_kernels.h" // NOLINT
#include "paddle_use_ops.h" // NOLINT
#include "paddle_api.h" // NOLINT
using namespace paddle::lite_api; // NOLINT
DEFINE_string(model_dir, "", "Model dir path.");
int64_t ShapeProduction(const shape_t& shape) {
int64_t res = 1;
for (auto i : shape) res *= i;
return res;
}
void CheckInput(char*** argv) {
if (FLAGS_model_dir == "") {
printf("Usage: %s --model_dir=<your-nb-model-directory>\n", *argv[0]);
exit(1);
}
printf("[WARN] model_dir:%s\n", FLAGS_model_dir.c_str());
}
void RunModel() {
void RunModel(std::string model_dir) {
// 1. Set MobileConfig
MobileConfig config;
config.set_model_dir(FLAGS_model_dir);
config.set_model_dir(model_dir);
// 2. Create PaddlePredictor by MobileConfig
std::shared_ptr<PaddlePredictor> predictor =
......@@ -60,15 +47,19 @@ void RunModel() {
// 5. Get output
std::unique_ptr<const Tensor> output_tensor(
std::move(predictor->GetOutput(0)));
printf("Output dim: %d\n", output_tensor->shape()[1]);
std::cout << "Output shape " << output_tensor->shape()[1] << std::endl;
for (int i = 0; i < ShapeProduction(output_tensor->shape()); i += 100) {
printf("Output[%d]: %f\n", i, output_tensor->data<float>()[i]);
std::cout << "Output[" << i << "]: " << output_tensor->data<float>()[i]
<< std::endl;
}
}
int main(int argc, char** argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
CheckInput(&argv);
RunModel();
if (argc < 2) {
std::cerr << "[ERROR] usage: ./" << argv[0] << " naive_buffer_model_dir\n";
exit(1);
}
std::string model_dir = argv[1];
RunModel(model_dir);
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册