diff --git a/lite/CMakeLists.txt b/lite/CMakeLists.txt index cb6a872e061a51f142bd2301171f0559a1ccb129..bac6f80c4721e0c5de201eebfe7e6a39a0bdc73a 100644 --- a/lite/CMakeLists.txt +++ b/lite/CMakeLists.txt @@ -232,6 +232,8 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM) COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_classify/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_classify/Makefile" COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/test_cv" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/test_cv/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/test_cv/Makefile" + COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mask_detection" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" + COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mask_detection/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mask_detection/Makefile" ) add_dependencies(publish_inference_android_cxx_demos logging gflags) add_dependencies(publish_inference_cxx_lib publish_inference_android_cxx_demos) @@ -251,6 +253,8 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM) COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mobile_classify/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mobile_classify/Makefile" COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/test_cv" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/test_cv/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/test_cv/Makefile" + COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/mask_detection" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" + COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/cxx/makefiles/mask_detection/Makefile.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx/mask_detection/Makefile" ) add_dependencies(tiny_publish_cxx_lib publish_inference_android_cxx_demos) endif() diff --git a/lite/demo/cxx/README.md b/lite/demo/cxx/README.md index 3217a7ed49006325715e22f8aa82d155bc8bf927..7760d5df2b82887b331cb4a536fe9e89a95c0ce7 100644 --- a/lite/demo/cxx/README.md +++ b/lite/demo/cxx/README.md @@ -71,25 +71,25 @@ tar zxvf mobilenet_v1.tar.gz ./model_optimize_tool optimize model make -adb -s emulator-5554 push mobile_classify /data/local/tmp/ -adb -s emulator-5554 push test.jpg /data/local/tmp/ -adb -s emulator-5554 push labels.txt /data/local/tmp/ -adb -s emulator-5554 push ../../../cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/ -adb -s emulator-5554 shell chmod +x /data/local/tmp/mobile_classify -adb -s emulator-5554 shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && +adb push mobile_classify /data/local/tmp/ +adb push test.jpg /data/local/tmp/ +adb push labels.txt /data/local/tmp/ +adb push ../../../cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/ +adb shell chmod +x /data/local/tmp/mobile_classify +adb shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && /data/local/tmp/mobile_classify /data/local/tmp/mobilenetv1opt2 /data/local/tmp/test.jpg /data/local/tmp/labels.txt" ``` 运行成功将在控制台输出预测结果的前5个类别的预测概率 - 如若想看前10个类别的预测概率,在运行命令输入topk的值即可 eg: ```shell - adb -s emulator-5554 shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && + adb shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && /data/local/tmp/mobile_classify /data/local/tmp/mobilenetv1opt2/ /data/local/tmp/test.jpg /data/local/tmp/labels.txt 10" ``` - 如若想看其他模型的分类结果, 在运行命令输入model_dir 及其model的输入大小即可 eg: ```shell - adb -s emulator-5554 shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && + adb shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && /data/local/tmp/mobile_classify /data/local/tmp/mobilenetv2opt2/ /data/local/tmp/test.jpg /data/local/tmp/labels.txt 10 224 224" ``` @@ -100,12 +100,34 @@ wget http://paddle-inference-dist.bj.bcebos.com/mobilenet_v1.tar.gz tar zxvf mobilenet_v1.tar.gz ./model_optimize_tool optimize model make -adb -s emulator-5554 push test_model_cv /data/local/tmp/ -adb -s emulator-5554 push test.jpg /data/local/tmp/ -adb -s emulator-5554 push labels.txt /data/local/tmp/ -adb -s emulator-5554 push ../../../cxx/lib/libpaddle_full_api_shared.so /data/local/tmp/ -adb -s emulator-5554 shell chmod +x /data/local/tmp/test_model_cv -adb -s emulator-5554 shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && +adb push test_model_cv /data/local/tmp/ +adb push test.jpg /data/local/tmp/ +adb push labels.txt /data/local/tmp/ +adb push ../../../cxx/lib/libpaddle_full_api_shared.so /data/local/tmp/ +adb shell chmod +x /data/local/tmp/test_model_cv +adb shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && /data/local/tmp/test_model_cv /data/local/tmp/mobilenetv1opt2 /data/local/tmp/test.jpg /data/local/tmp/labels.txt" ``` 运行成功将在控制台输出预测结果的前10个类别的预测概率 + +10. 编译并运行mask_detection口罩检测的demo + +注:运行该demo所需的libpaddle_light_api_shared.so,编译选项需使用build_extra=ON + +```shell +cd ../mask_detection +wget https://paddle-inference-dist.bj.bcebos.com/mask_detection.tar.gz +tar zxvf mask_detection.tar.gz +make +adb push mask_detection /data/local/tmp/ +adb push test.jpg /data/local/tmp/ +adb push face_detection /data/local/tmp +adb push mask_classification /data/local/tmp +adb push ../../../cxx/lib/libpaddle_light_api_shared.so /data/local/tmp/ +adb shell chmod +x /data/local/tmp/mask_detection +adb shell "export LD_LIBRARY_PATH=/data/local/tmp/:$LD_LIBRARY_PATH && +/data/local/tmp/mask_detection /data/local/tmp/face_detection \ +/data/local/tmp/mask_classification /data/local/tmp/test.jpg" +adb pull /data/local/tmp/test_mask_detection_result.jpg ./ +``` +运行成功将在mask_detection目录下看到生成的口罩检测结果图像: test_mask_detection_result.jpg diff --git a/lite/demo/cxx/makefiles/mask_detection/Makefile.android.armv7 b/lite/demo/cxx/makefiles/mask_detection/Makefile.android.armv7 new file mode 100644 index 0000000000000000000000000000000000000000..dd6d4b0960160e140e2f051b78814d2fee08d5e0 --- /dev/null +++ b/lite/demo/cxx/makefiles/mask_detection/Makefile.android.armv7 @@ -0,0 +1,61 @@ +ARM_ABI = arm7 +export ARM_ABI + +include ../Makefile.def + +LITE_ROOT=../../../ + +THIRD_PARTY_DIR=${LITE_ROOT}/third_party + +OPENCV_VERSION=opencv4.1.0 + +OPENCV_LIBS = ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgcodecs.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgproc.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_core.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtegra_hal.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjpeg-turbo.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibwebp.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibpng.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjasper.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibtiff.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libIlmImf.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtbb.a \ + ../../../third_party/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libcpufeatures.a + +OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/armeabi-v7a/include + +CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include + +CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS) + +############################################################### +# How to use one of static libaray: # +# `libpaddle_api_full_bundled.a` # +# `libpaddle_api_light_bundled.a` # +############################################################### +# Note: default use lite's shared library. # +############################################################### +# 1. Comment above line using `libpaddle_light_api_shared.so` +# 2. Undo comment below line using `libpaddle_api_light_bundled.a` + +#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS) + +mask_detection: fetch_opencv mask_detection.o + $(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mask_detection.o -o mask_detection $(CXX_LIBS) $(LDFLAGS) + +mask_detection.o: mask_detection.cc + $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o mask_detection.o -c mask_detection.cc + +fetch_opencv: + @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR} + @ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \ + (echo "fetch opencv libs" && \ + wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz) + @ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \ + tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR} + + +.PHONY: clean +clean: + rm -f mask_detection.o + rm -f mask_detection diff --git a/lite/demo/cxx/makefiles/mask_detection/Makefile.android.armv8 b/lite/demo/cxx/makefiles/mask_detection/Makefile.android.armv8 new file mode 100644 index 0000000000000000000000000000000000000000..c2f601ed2f68c342b47c5add451f84c537f978de --- /dev/null +++ b/lite/demo/cxx/makefiles/mask_detection/Makefile.android.armv8 @@ -0,0 +1,61 @@ +ARM_ABI = arm8 +export ARM_ABI + +include ../Makefile.def + +LITE_ROOT=../../../ + +THIRD_PARTY_DIR=${LITE_ROOT}/third_party + +OPENCV_VERSION=opencv4.1.0 + +OPENCV_LIBS = ../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgcodecs.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgproc.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_core.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtegra_hal.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjpeg-turbo.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibwebp.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibpng.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjasper.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibtiff.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libIlmImf.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtbb.a \ + ../../../third_party/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libcpufeatures.a + +OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/arm64-v8a/include + +CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include + +CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS) + +############################################################### +# How to use one of static libaray: # +# `libpaddle_api_full_bundled.a` # +# `libpaddle_api_light_bundled.a` # +############################################################### +# Note: default use lite's shared library. # +############################################################### +# 1. Comment above line using `libpaddle_light_api_shared.so` +# 2. Undo comment below line using `libpaddle_api_light_bundled.a` + +#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS) + +mask_detection: fetch_opencv mask_detection.o + $(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) mask_detection.o -o mask_detection $(CXX_LIBS) $(LDFLAGS) + +mask_detection.o: mask_detection.cc + $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o mask_detection.o -c mask_detection.cc + +fetch_opencv: + @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR} + @ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \ + (echo "fetch opencv libs" && \ + wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz) + @ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \ + tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR} + + +.PHONY: clean +clean: + rm -f mask_detection.o + rm -f mask_detection diff --git a/lite/demo/cxx/mask_detection/mask_detection.cc b/lite/demo/cxx/mask_detection/mask_detection.cc new file mode 100644 index 0000000000000000000000000000000000000000..748b84365fc70aa59171a6bf8847f554308fdc8c --- /dev/null +++ b/lite/demo/cxx/mask_detection/mask_detection.cc @@ -0,0 +1,246 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "paddle_api.h" // NOLINT + +using namespace paddle::lite_api; // NOLINT + +struct Object { + int batch_id; + cv::Rect rec; + int class_id; + float prob; +}; + +int64_t ShapeProduction(const shape_t& shape) { + int64_t res = 1; + for (auto i : shape) res *= i; + return res; +} + +// fill tensor with mean and scale and trans layout: nhwc -> nchw, neon speed up +void neon_mean_scale(const float* din, + float* dout, + int size, + const std::vector mean, + const std::vector scale) { + if (mean.size() != 3 || scale.size() != 3) { + std::cerr << "[ERROR] mean or scale size must equal to 3\n"; + exit(1); + } + float32x4_t vmean0 = vdupq_n_f32(mean[0]); + float32x4_t vmean1 = vdupq_n_f32(mean[1]); + float32x4_t vmean2 = vdupq_n_f32(mean[2]); + float32x4_t vscale0 = vdupq_n_f32(scale[0]); + float32x4_t vscale1 = vdupq_n_f32(scale[1]); + float32x4_t vscale2 = vdupq_n_f32(scale[2]); + + float* dout_c0 = dout; + float* dout_c1 = dout + size; + float* dout_c2 = dout + size * 2; + + int i = 0; + for (; i < size - 3; i += 4) { + float32x4x3_t vin3 = vld3q_f32(din); + float32x4_t vsub0 = vsubq_f32(vin3.val[0], vmean0); + float32x4_t vsub1 = vsubq_f32(vin3.val[1], vmean1); + float32x4_t vsub2 = vsubq_f32(vin3.val[2], vmean2); + float32x4_t vs0 = vmulq_f32(vsub0, vscale0); + float32x4_t vs1 = vmulq_f32(vsub1, vscale1); + float32x4_t vs2 = vmulq_f32(vsub2, vscale2); + vst1q_f32(dout_c0, vs0); + vst1q_f32(dout_c1, vs1); + vst1q_f32(dout_c2, vs2); + + din += 12; + dout_c0 += 4; + dout_c1 += 4; + dout_c2 += 4; + } + for (; i < size; i++) { + *(dout_c0++) = (*(din++) - mean[0]) * scale[0]; + *(dout_c1++) = (*(din++) - mean[1]) * scale[1]; + *(dout_c2++) = (*(din++) - mean[2]) * scale[2]; + } +} + +void pre_process(const cv::Mat& img, + int width, + int height, + const std::vector& mean, + const std::vector& scale, + float* data, + bool is_scale = false) { + cv::Mat resized_img; + cv::resize( + img, resized_img, cv::Size(width, height), 0.f, 0.f, cv::INTER_CUBIC); + cv::Mat imgf; + float scale_factor = is_scale ? 1.f / 256 : 1.f; + resized_img.convertTo(imgf, CV_32FC3, scale_factor); + const float* dimg = reinterpret_cast(imgf.data); + neon_mean_scale(dimg, data, width * height, mean, scale); +} + +void RunModel(std::string det_model_dir, + std::string class_model_dir, + std::string img_path) { + // Prepare + cv::Mat img = imread(img_path, cv::IMREAD_COLOR); + float shrink = 0.2; + int width = img.cols; + int height = img.rows; + int s_width = static_cast(width * shrink); + int s_height = static_cast(height * shrink); + + // Detection + MobileConfig config; + config.set_model_dir(det_model_dir); + + // Create Predictor For Detction Model + std::shared_ptr predictor = + CreatePaddlePredictor(config); + + // Get Input Tensor + std::unique_ptr input_tensor0(std::move(predictor->GetInput(0))); + input_tensor0->Resize({1, 3, s_height, s_width}); + auto* data = input_tensor0->mutable_data(); + + // Do PreProcess + std::vector detect_mean = {104.f, 117.f, 123.f}; + std::vector detect_scale = {0.007843, 0.007843, 0.007843}; + pre_process(img, s_width, s_height, detect_mean, detect_scale, data, false); + + // Detection Model Run + predictor->Run(); + + // Get Output Tensor + std::unique_ptr output_tensor0( + std::move(predictor->GetOutput(0))); + auto* outptr = output_tensor0->data(); + auto shape_out = output_tensor0->shape(); + int64_t out_len = ShapeProduction(shape_out); + + // Filter Out Detection Box + float detect_threshold = 0.3; + std::vector detect_result; + for (int i = 0; i < out_len / 6; ++i) { + if (outptr[1] >= detect_threshold) { + Object obj; + int xmin = static_cast(width * outptr[2]); + int ymin = static_cast(height * outptr[3]); + int xmax = static_cast(width * outptr[4]); + int ymax = static_cast(height * outptr[5]); + int w = xmax - xmin; + int h = ymax - ymin; + cv::Rect rec_clip = + cv::Rect(xmin, ymin, w, h) & cv::Rect(0, 0, width, height); + obj.rec = rec_clip; + detect_result.push_back(obj); + } + outptr += 6; + } + + // Classification + config.set_model_dir(class_model_dir); + + // Create Predictor For Classification Model + predictor = CreatePaddlePredictor(config); + + // Get Input Tensor + std::unique_ptr input_tensor1(std::move(predictor->GetInput(0))); + int classify_w = 128; + int classify_h = 128; + input_tensor1->Resize({1, 3, classify_h, classify_w}); + auto* input_data = input_tensor1->mutable_data(); + int detect_num = detect_result.size(); + std::vector classify_mean = {0.5f, 0.5f, 0.5f}; + std::vector classify_scale = {1.f, 1.f, 1.f}; + float classify_threshold = 0.5; + for (int i = 0; i < detect_num; ++i) { + cv::Rect rec_clip = detect_result[i].rec; + cv::Mat roi = img(rec_clip); + + // Do PreProcess + pre_process(roi, + classify_w, + classify_h, + classify_mean, + classify_scale, + input_data, + true); + + // Classification Model Run + predictor->Run(); + + // Get Output Tensor + std::unique_ptr output_tensor1( + std::move(predictor->GetOutput(1))); + auto* outptr = output_tensor1->data(); + + // Draw Detection and Classification Results + cv::rectangle(img, rec_clip, cv::Scalar(0, 0, 255), 2, cv::LINE_AA); + std::string text = outptr[1] > classify_threshold ? "wear mask" : "no mask"; + int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL; + double font_scale = 1.f; + int thickness = 1; + cv::Size text_size = + cv::getTextSize(text, font_face, font_scale, thickness, nullptr); + float new_font_scale = rec_clip.width * 0.7 * font_scale / text_size.width; + text_size = + cv::getTextSize(text, font_face, new_font_scale, thickness, nullptr); + cv::Point origin; + origin.x = rec_clip.x + 5; + origin.y = rec_clip.y + text_size.height + 5; + cv::putText(img, + text, + origin, + font_face, + new_font_scale, + cv::Scalar(0, 255, 255), + thickness, + cv::LINE_AA); + + std::cout << "detect face, location: x=" << rec_clip.x + << ", y=" << rec_clip.y << ", width=" << rec_clip.width + << ", height=" << rec_clip.height + << ", wear mask: " << (outptr[1] > classify_threshold) + << std::endl; + } + + // Write Result to Image File + int start = img_path.find_last_of("/"); + int end = img_path.find_last_of("."); + std::string img_name = img_path.substr(start + 1, end - start - 1); + std::string result_name = img_name + "_mask_detection_result.jpg"; + cv::imwrite(result_name, img); +} + +int main(int argc, char** argv) { + if (argc < 3) { + std::cerr << "[ERROR] usage: " << argv[0] + << " detction_model_dir classification_model_dir image_path\n"; + exit(1); + } + std::string detect_model_dir = argv[1]; + std::string classify_model_dir = argv[2]; + std::string img_path = argv[3]; + RunModel(detect_model_dir, classify_model_dir, img_path); + return 0; +} diff --git a/lite/demo/cxx/ssd_detection/ssd_detection.cc b/lite/demo/cxx/ssd_detection/ssd_detection.cc index 011733eb87f551141c52ab8e23d9625c93c742fc..2408afcbf64a24924eca119a9d9481dc030250c9 100644 --- a/lite/demo/cxx/ssd_detection/ssd_detection.cc +++ b/lite/demo/cxx/ssd_detection/ssd_detection.cc @@ -82,8 +82,8 @@ void neon_mean_scale(const float* din, } for (; i < size; i++) { *(dout_c0++) = (*(din++) - mean[0]) * scale[0]; - *(dout_c0++) = (*(din++) - mean[1]) * scale[1]; - *(dout_c0++) = (*(din++) - mean[2]) * scale[2]; + *(dout_c1++) = (*(din++) - mean[1]) * scale[1]; + *(dout_c2++) = (*(din++) - mean[2]) * scale[2]; } } @@ -188,13 +188,12 @@ void RunModel(std::string model_dir, std::string img_path) { std::move(predictor->GetOutput(0))); auto* outptr = output_tensor->data(); auto shape_out = output_tensor->shape(); - int64_t cnt = 1; - for (auto& i : shape_out) { - cnt *= i; - } + int64_t cnt = ShapeProduction(shape_out); auto rec_out = detect_object(outptr, static_cast(cnt / 6), 0.6f, img); - std::string result_name = - img_path.substr(0, img_path.find(".")) + "_ssd_detection_result.jpg"; + int start = img_path.find_last_of("/"); + int end = img_path.find_last_of("."); + std::string img_name = img_path.substr(start + 1, end - start - 1); + std::string result_name = img_name + "_ssd_detection_result.jpg"; cv::imwrite(result_name, img); }