提交 050ada10 编写于 作者: S sibo2rr

Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleClas into develop

......@@ -11,3 +11,4 @@ _build/
build/
log/
nohup.out
.DS_Store
#!/bin/bash
dataset_url=$1
package_check_list=(imageio tqdm Cython pycocotools tb_paddle scipy pandas wget h5py sklearn opencv-python visualdl)
for package in ${package_check_list[@]}; do
if python -c "import ${package}" >/dev/null 2>&1; then
echo "${package} have already installed"
else
echo "${package} NOT FOUND"
pip install ${package}
echo "${package} installed"
fi
done
cd dataset
rm -rf ILSVRC2012
wget -nc ${dataset_url}
......
......@@ -5,20 +5,25 @@
# pip install ...
# 2 拷贝该模型需要数据、预训练模型
# 3 批量运行(如不方便批量,1,2需放到单个模型中)
model_mode_list=(MobileNetV1 MobileNetV2 MobileNetV3_large_x1_0 EfficientNetB0 ShuffleNetV2_x1_0 DenseNet121 HRNet_W48_C SwinTransformer_tiny_patch4_window7_224 alt_gvt_base)
model_mode_list=(MobileNetV1 MobileNetV2 MobileNetV3_large_x1_0 ShuffleNetV2_x1_0 HRNet_W48_C SwinTransformer_tiny_patch4_window7_224 alt_gvt_base) # benchmark 监控模型列表
#model_mode_list=(MobileNetV1 MobileNetV2 MobileNetV3_large_x1_0 EfficientNetB0 ShuffleNetV2_x1_0 DenseNet121 HRNet_W48_C SwinTransformer_tiny_patch4_window7_224 alt_gvt_base) # 该脚本支持列表
fp_item_list=(fp32)
bs_list=(32 64 96 128)
#bs_list=(32 64 96 128)
for model_mode in ${model_mode_list[@]}; do
for fp_item in ${fp_item_list[@]}; do
if [ ${model_mode} = MobileNetV3_large_x1_0 ] || [ ${model_mode} = ShuffleNetV2_x1_0 ]; then
bs_list=(256)
else
bs_list=(64)
fi
for bs_item in ${bs_list[@]};do
echo "index is speed, 1gpus, begin, ${model_name}"
run_mode=sp
CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 10 ${model_mode} # (5min)
CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 1 ${model_mode} | tee ${log_path}/clas_${model_mode}_${run_mode}_bs${bs_item}_${fp_item}_1gpus 2>&1 # (5min)
sleep 10
echo "index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}"
run_mode=mp
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 10 ${model_mode}
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 1 ${model_mode}| tee ${log_path}/clas_${model_mode}_${run_mode}_bs${bs_item}_${fp_item}_8gpus8p 2>&1
sleep 10
done
done
......
......@@ -6,10 +6,19 @@ function _set_params(){
run_mode=${1:-"sp"} # 单卡sp|多卡mp
batch_size=${2:-"64"}
fp_item=${3:-"fp32"} # fp32|fp16
epochs=${4:-"10"} # 可选,如果需要修改代码提前中断
epochs=${4:-"2"} # 可选,如果需要修改代码提前中断
model_name=${5:-"model_name"}
run_log_path="${TRAIN_LOG_DIR:-$(pwd)}/benchmark" # TRAIN_LOG_DIR 后续QA设置该参数
index=1
mission_name="图像分类" # 模型所属任务名称,具体可参考scripts/config.ini (必填)
direction_id=0 # 任务所属方向,0:CV,1:NLP,2:Rec。 (必填)
skip_steps=8 # 解析日志,有些模型前几个step耗时长,需要跳过 (必填)
keyword="ips:" # 解析日志,筛选出数据所在行的关键字 (必填)
keyword_loss="loss:" #选填
model_mode=-1 # 解析日志,具体参考scripts/analysis.py. (必填)
ips_unit="images/s"
base_batch_size=$batch_size
# 以下不用修改
device=${CUDA_VISIBLE_DEVICES//,/ }
arr=(${device})
......@@ -26,7 +35,7 @@ function _train(){
model_config=`find ppcls/configs/ImageNet -name ${model_name}_fp16.yaml`
fi
train_cmd="-c ${model_config} -o DataLoader.Train.sampler.batch_size=${batch_size} -o Global.epochs=${epochs}"
train_cmd="-c ${model_config} -o DataLoader.Train.sampler.batch_size=${batch_size} -o Global.epochs=${epochs} -o Global.eval_during_train=False -o Global.print_batch_step=2"
case ${run_mode} in
sp) train_cmd="python -u tools/train.py ${train_cmd}" ;;
mp)
......@@ -36,7 +45,7 @@ function _train(){
esac
rm -rf mylog
# 以下不用修改
timeout 15m ${train_cmd} > ${log_file} 2>&1
timeout 5m ${train_cmd} > ${log_file} 2>&1
if [ $? -ne 0 ];then
echo -e "${model_name}, FAIL"
export job_fail_flag=1
......@@ -51,6 +60,8 @@ function _train(){
cp mylog/workerlog.0 ${log_file}
fi
}
source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在连调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开
_set_params $@
_train
_run
#_train
project(pp_shitu CXX C)
cmake_minimum_required(VERSION 3.14)
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
option(FAISS_WITH_MKL "Faiss Compile demo with MKL." OFF)
SET(PADDLE_LIB "" CACHE PATH "Location of libraries")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(CUDA_LIB "" CACHE PATH "Location of libraries")
SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
SET(FAISS_DIR "" CACHE PATH "Location of libraries")
set(DEMO_NAME "pp_shitu")
if (FAISS_WITH_MKL)
SET(BLAS_NAME "mklml_intel")
else()
SET(BLAS_NAME "openblas")
endif()
include(external-cmake/yaml-cpp.cmake)
include_directories("${CMAKE_SOURCE_DIR}/")
include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
macro(safe_set_static_flag)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endmacro()
if (WITH_MKL)
ADD_DEFINITIONS(-DUSE_MKL)
endif()
if(NOT DEFINED FAISS_DIR)
message(FATAL_ERROR "please set FAISS_DIR with -DFAISS_DIR=/path/faiss")
endif()
if(NOT DEFINED PADDLE_LIB)
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
endif()
if(NOT DEFINED OPENCV_DIR)
message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
endif()
if (WIN32)
include_directories("${FAISS_DIR}/include")
link_directories("${FAISS_DIR}/lib")
find_package(faiss REQUIRED PATHS ${FAISS_DIR}/share/faiss/ NO_DEFAULT_PATH)
else()
find_package(faiss REQUIRED PATHS ${FAISS_DIR}/share/faiss NO_DEFAULT_PATH)
include_directories("${FAISS_DIR}/include")
link_directories("${FAISS_DIR}/lib")
endif()
if (WIN32)
include_directories("${PADDLE_LIB}/paddle/fluid/inference")
include_directories("${PADDLE_LIB}/paddle/include")
link_directories("${PADDLE_LIB}/paddle/fluid/inference")
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
else ()
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
include_directories("${PADDLE_LIB}/paddle/include")
link_directories("${PADDLE_LIB}/paddle/lib")
endif ()
include_directories(${OpenCV_INCLUDE_DIRS})
if (WIN32)
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT")
if (WITH_STATIC_LIB)
safe_set_static_flag()
add_definitions(-DSTATIC_LIB)
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O3 -std=c++11")
set(CMAKE_STATIC_LIBRARY_PREFIX "")
endif()
message("flags" ${CMAKE_CXX_FLAGS})
if (WITH_GPU)
if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "")
message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda-8.0/lib64")
endif()
if (NOT WIN32)
if (NOT DEFINED CUDNN_LIB)
message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn_v7.4/cuda/lib64")
endif()
endif(NOT WIN32)
endif()
include_directories("${PADDLE_LIB}/third_party/install/protobuf/include")
include_directories("${PADDLE_LIB}/third_party/install/glog/include")
include_directories("${PADDLE_LIB}/third_party/install/gflags/include")
include_directories("${PADDLE_LIB}/third_party/install/xxhash/include")
include_directories("${PADDLE_LIB}/third_party/install/zlib/include")
include_directories("${PADDLE_LIB}/third_party/boost")
include_directories("${PADDLE_LIB}/third_party/eigen3")
include_directories("${CMAKE_SOURCE_DIR}/")
if (NOT WIN32)
if (WITH_TENSORRT AND WITH_GPU)
include_directories("${TENSORRT_DIR}/include")
link_directories("${TENSORRT_DIR}/lib")
endif()
endif(NOT WIN32)
link_directories("${PADDLE_LIB}/third_party/install/zlib/lib")
link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
link_directories("${PADDLE_LIB}/third_party/install/xxhash/lib")
link_directories("${PADDLE_LIB}/paddle/lib")
if(WITH_MKL)
include_directories("${PADDLE_LIB}/third_party/install/mklml/include")
if (WIN32)
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.lib
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.lib)
else ()
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
execute_process(COMMAND cp -r ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
endif ()
set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn")
if(EXISTS ${MKLDNN_PATH})
include_directories("${MKLDNN_PATH}/include")
if (WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else ()
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif ()
endif()
else()
if (WIN32)
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
else ()
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif ()
endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if(WITH_STATIC_LIB)
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
else()
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif(WITH_STATIC_LIB)
if (NOT WIN32)
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf z xxhash yaml-cpp
)
if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib")
set(DEPS ${DEPS} snappystream)
endif()
if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib")
set(DEPS ${DEPS} snappy)
endif()
else()
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags_static libprotobuf xxhash libyaml-cppmt)
set(DEPS ${DEPS} libcmt shlwapi)
if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib")
set(DEPS ${DEPS} snappy)
endif()
if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib")
set(DEPS ${DEPS} snappystream)
endif()
endif(NOT WIN32)
if(WITH_GPU)
if(NOT WIN32)
if (WITH_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDNN_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
if (NOT WIN32)
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
set(DEPS ${DEPS} ${EXTERNAL_LIB})
endif()
set(DEPS ${DEPS} ${OpenCV_LIBS})
include(FetchContent)
include(external-cmake/auto-log.cmake)
include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src)
AUX_SOURCE_DIRECTORY(./src SRCS)
add_executable(${DEMO_NAME} ${SRCS})
ADD_DEPENDENCIES(${DEMO_NAME} ext-yaml-cpp)
target_link_libraries(${DEMO_NAME} ${DEPS})
target_link_libraries(${DEMO_NAME} ${FAISS_DIR}/lib/libfaiss${CMAKE_STATIC_LIBRARY_SUFFIX})
target_link_libraries(${DEMO_NAME} ${BLAS_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX})
if (WIN32 AND WITH_MKL)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
)
endif()
find_package(Git REQUIRED)
include(FetchContent)
set(FETCHCONTENT_BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/third-party")
FetchContent_Declare(
extern_Autolog
PREFIX autolog
GIT_REPOSITORY https://github.com/LDOUBLEV/AutoLog.git
GIT_TAG main
)
FetchContent_MakeAvailable(extern_Autolog)
find_package(Git REQUIRED)
include(ExternalProject)
message("${CMAKE_BUILD_TYPE}")
ExternalProject_Add(
ext-yaml-cpp
URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
URL_MD5 9542d6de397d1fbd649ed468cb5850e6
CMAKE_ARGS
-DYAML_CPP_BUILD_TESTS=OFF
-DYAML_CPP_BUILD_TOOLS=OFF
-DYAML_CPP_INSTALL=OFF
-DYAML_CPP_BUILD_CONTRIB=OFF
-DMSVC_SHARED_RT=OFF
-DBUILD_SHARED_LIBS=OFF
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
PREFIX "${CMAKE_BINARY_DIR}/ext/yaml-cpp"
# Disable install step
INSTALL_COMMAND ""
LOG_DOWNLOAD ON
LOG_BUILD 1
)
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "paddle_inference_api.h"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <numeric>
#include <include/preprocess_op.h>
#include <include/yaml_config.h>
using namespace paddle_infer;
namespace Feature {
class FeatureExtracter {
public:
explicit FeatureExtracter(const YAML::Node &config_file) {
this->use_gpu_ = config_file["Global"]["use_gpu"].as<bool>();
if (config_file["Global"]["gpu_id"].IsDefined())
this->gpu_id_ = config_file["Global"]["gpu_id"].as<int>();
else
this->gpu_id_ = 0;
this->gpu_mem_ = config_file["Global"]["gpu_mem"].as<int>();
this->cpu_math_library_num_threads_ =
config_file["Global"]["cpu_num_threads"].as<int>();
this->use_mkldnn_ = config_file["Global"]["enable_mkldnn"].as<bool>();
this->use_tensorrt_ = config_file["Global"]["use_tensorrt"].as<bool>();
this->use_fp16_ = config_file["Global"]["use_fp16"].as<bool>();
this->cls_model_path_ =
config_file["Global"]["rec_inference_model_dir"].as<std::string>() +
OS_PATH_SEP + "inference.pdmodel";
this->cls_params_path_ =
config_file["Global"]["rec_inference_model_dir"].as<std::string>() +
OS_PATH_SEP + "inference.pdiparams";
this->resize_size_ =
config_file["RecPreProcess"]["transform_ops"][0]["ResizeImage"]["size"]
.as<int>();
this->scale_ = config_file["RecPreProcess"]["transform_ops"][1]["NormalizeImage"]["scale"].as<float>();
this->mean_ = config_file["RecPreProcess"]["transform_ops"][1]
["NormalizeImage"]["mean"]
.as < std::vector < float >> ();
this->std_ = config_file["RecPreProcess"]["transform_ops"][1]
["NormalizeImage"]["std"]
.as < std::vector < float >> ();
if (config_file["Global"]["rec_feature_normlize"].IsDefined())
this->feature_norm =
config_file["Global"]["rec_feature_normlize"].as<bool>();
LoadModel(cls_model_path_, cls_params_path_);
}
// Load Paddle inference model
void LoadModel(const std::string &model_path, const std::string &params_path);
// Run predictor
void Run(cv::Mat &img, std::vector<float> &out_data,
std::vector<double> &times);
void FeatureNorm(std::vector<float> &feature);
std::shared_ptr <Predictor> predictor_;
private:
bool use_gpu_ = false;
int gpu_id_ = 0;
int gpu_mem_ = 4000;
int cpu_math_library_num_threads_ = 4;
bool use_mkldnn_ = false;
bool use_tensorrt_ = false;
bool feature_norm = true;
bool use_fp16_ = false;
std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
std::vector<float> std_ = {0.229f, 0.224f, 0.225f};
float scale_ = 0.00392157;
int resize_size_ = 224;
int resize_short_ = 224;
std::string cls_model_path_;
std::string cls_params_path_;
// pre-process
ResizeImg resize_op_;
Normalize normalize_op_;
Permute permute_op_;
};
} // namespace Feature
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code is adpated from opencv(https://github.com/opencv/opencv)
#include <algorithm>
#include <include/object_detector.h>
template<typename T>
static inline bool SortScorePairDescend(const std::pair<float, T> &pair1,
const std::pair<float, T> &pair2) {
return pair1.first > pair2.first;
}
float RectOverlap(const Detection::ObjectResult &a,
const Detection::ObjectResult &b) {
float Aa = (a.rect[2] - a.rect[0] + 1) * (a.rect[3] - a.rect[1] + 1);
float Ab = (b.rect[2] - b.rect[0] + 1) * (b.rect[3] - b.rect[1] + 1);
int iou_w = max(min(a.rect[2], b.rect[2]) - max(a.rect[0], b.rect[0]) + 1, 0);
int iou_h = max(min(a.rect[3], b.rect[3]) - max(a.rect[1], b.rect[1]) + 1, 0);
float Aab = iou_w * iou_h;
return Aab / (Aa + Ab - Aab);
}
// Get max scores with corresponding indices.
// scores: a set of scores.
// threshold: only consider scores higher than the threshold.
// top_k: if -1, keep all; otherwise, keep at most top_k.
// score_index_vec: store the sorted (score, index) pair.
inline void
GetMaxScoreIndex(const std::vector <Detection::ObjectResult> &det_result,
const float threshold,
std::vector <std::pair<float, int>> &score_index_vec) {
// Generate index score pairs.
for (size_t i = 0; i < det_result.size(); ++i) {
if (det_result[i].confidence > threshold) {
score_index_vec.push_back(std::make_pair(det_result[i].confidence, i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(score_index_vec.begin(), score_index_vec.end(),
SortScorePairDescend<int>);
// // Keep top_k scores if needed.
// if (top_k > 0 && top_k < (int)score_index_vec.size())
// {
// score_index_vec.resize(top_k);
// }
}
void NMSBoxes(const std::vector <Detection::ObjectResult> det_result,
const float score_threshold, const float nms_threshold,
std::vector<int> &indices) {
int a = 1;
// Get top_k scores (with corresponding indices).
std::vector <std::pair<float, int>> score_index_vec;
GetMaxScoreIndex(det_result, score_threshold, score_index_vec);
// Do nms
indices.clear();
for (size_t i = 0; i < score_index_vec.size(); ++i) {
const int idx = score_index_vec[i].second;
bool keep = true;
for (int k = 0; k < (int) indices.size() && keep; ++k) {
const int kept_idx = indices[k];
float overlap = RectOverlap(det_result[idx], det_result[kept_idx]);
keep = overlap <= nms_threshold;
}
if (keep)
indices.push_back(idx);
}
}
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ctime>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "paddle_inference_api.h" // NOLINT
#include "include/preprocess_op_det.h"
#include "include/yaml_config.h"
using namespace paddle_infer;
namespace Detection {
// Object Detection Result
struct ObjectResult {
// Rectangle coordinates of detected object: left, right, top, down
std::vector<int> rect;
// Class id of detected object
int class_id;
// Confidence of detected object
float confidence;
};
// Generate visualization colormap for each class
std::vector<int> GenerateColorMap(int num_class);
// Visualiztion Detection Result
cv::Mat VisualizeResult(const cv::Mat &img,
const std::vector <ObjectResult> &results,
const std::vector <std::string> &lables,
const std::vector<int> &colormap, const bool is_rbox);
class ObjectDetector {
public:
explicit ObjectDetector(const YAML::Node &config_file) {
this->use_gpu_ = config_file["Global"]["use_gpu"].as<bool>();
if (config_file["Global"]["gpu_id"].IsDefined())
this->gpu_id_ = config_file["Global"]["gpu_id"].as<int>();
this->gpu_mem_ = config_file["Global"]["gpu_mem"].as<int>();
this->cpu_math_library_num_threads_ =
config_file["Global"]["cpu_num_threads"].as<int>();
this->use_mkldnn_ = config_file["Global"]["enable_mkldnn"].as<bool>();
this->use_tensorrt_ = config_file["Global"]["use_tensorrt"].as<bool>();
this->use_fp16_ = config_file["Global"]["use_fp16"].as<bool>();
this->model_dir_ =
config_file["Global"]["det_inference_model_dir"].as<std::string>();
this->threshold_ = config_file["Global"]["threshold"].as<float>();
this->max_det_results_ = config_file["Global"]["max_det_results"].as<int>();
this->image_shape_ =
config_file["Global"]["image_shape"].as < std::vector < int >> ();
this->label_list_ =
config_file["Global"]["labe_list"].as < std::vector < std::string >> ();
this->ir_optim_ = config_file["Global"]["ir_optim"].as<bool>();
this->batch_size_ = config_file["Global"]["batch_size"].as<int>();
preprocessor_.Init(config_file["DetPreProcess"]["transform_ops"]);
LoadModel(model_dir_, batch_size_, run_mode);
}
// Load Paddle inference model
void LoadModel(const std::string &model_dir, const int batch_size = 1,
const std::string &run_mode = "fluid");
// Run predictor
void Predict(const std::vector <cv::Mat> imgs, const int warmup = 0,
const int repeats = 1,
std::vector <ObjectResult> *result = nullptr,
std::vector<int> *bbox_num = nullptr,
std::vector<double> *times = nullptr);
const std::vector <std::string> &GetLabelList() const {
return this->label_list_;
}
const float &GetThreshold() const { return this->threshold_; }
private:
bool use_gpu_ = true;
int gpu_id_ = 0;
int gpu_mem_ = 800;
int cpu_math_library_num_threads_ = 6;
std::string run_mode = "fluid";
bool use_mkldnn_ = false;
bool use_tensorrt_ = false;
bool batch_size_ = 1;
bool use_fp16_ = false;
std::string model_dir_;
float threshold_ = 0.5;
float max_det_results_ = 5;
std::vector<int> image_shape_ = {3, 640, 640};
std::vector <std::string> label_list_;
bool ir_optim_ = true;
bool det_permute_ = true;
bool det_postprocess_ = true;
int min_subgraph_size_ = 30;
bool use_dynamic_shape_ = false;
int trt_min_shape_ = 1;
int trt_max_shape_ = 1280;
int trt_opt_shape_ = 640;
bool trt_calib_mode_ = false;
// Preprocess image and copy data to input buffer
void Preprocess(const cv::Mat &image_mat);
// Postprocess result
void Postprocess(const std::vector <cv::Mat> mats,
std::vector <ObjectResult> *result, std::vector<int> bbox_num,
bool is_rbox);
std::shared_ptr <Predictor> predictor_;
Preprocessor preprocessor_;
ImageBlob inputs_;
std::vector<float> output_data_;
std::vector<int> out_bbox_num_data_;
};
} // namespace Detection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <numeric>
using namespace std;
namespace Feature {
class Normalize {
public:
virtual void Run(cv::Mat *im, const std::vector<float> &mean,
const std::vector<float> &std, float scale);
};
// RGB -> CHW
class Permute {
public:
virtual void Run(const cv::Mat *im, float *data);
};
class CenterCropImg {
public:
virtual void Run(cv::Mat &im, const int crop_size = 224);
};
class ResizeImg {
public:
virtual void Run(const cv::Mat &img, cv::Mat &resize_img, int max_size_len,
int size = 0);
};
} // namespace Feature
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <glog/logging.h>
#include <yaml-cpp/yaml.h>
#include <iostream>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
namespace Detection {
// Object for storing all preprocessed data
class ImageBlob {
public:
// image width and height
std::vector<float> im_shape_;
// Buffer for image data after preprocessing
std::vector<float> im_data_;
// in net data shape(after pad)
std::vector<float> in_net_shape_;
// Evaluation image width and height
// std::vector<float> eval_im_size_f_;
// Scale factor for image size to origin image size
std::vector<float> scale_factor_;
};
// Abstraction of preprocessing opration class
class PreprocessOp {
public:
virtual void Init(const YAML::Node &item) = 0;
virtual void Run(cv::Mat *im, ImageBlob *data) = 0;
};
class InitInfo : public PreprocessOp {
public:
virtual void Init(const YAML::Node &item) {}
virtual void Run(cv::Mat *im, ImageBlob *data);
};
class NormalizeImage : public PreprocessOp {
public:
virtual void Init(const YAML::Node &item) {
mean_ = item["mean"].as < std::vector < float >> ();
scale_ = item["std"].as < std::vector < float >> ();
is_scale_ = item["is_scale"].as<bool>();
}
virtual void Run(cv::Mat *im, ImageBlob *data);
private:
// CHW or HWC
std::vector<float> mean_;
std::vector<float> scale_;
bool is_scale_;
};
class Permute : public PreprocessOp {
public:
virtual void Init(const YAML::Node &item) {}
virtual void Run(cv::Mat *im, ImageBlob *data);
};
class Resize : public PreprocessOp {
public:
virtual void Init(const YAML::Node &item) {
interp_ = item["interp"].as<int>();
// max_size_ = item["target_size"].as<int>();
keep_ratio_ = item["keep_ratio"].as<bool>();
target_size_ = item["target_size"].as < std::vector < int >> ();
}
// Compute best resize scale for x-dimension, y-dimension
std::pair<double, double> GenerateScale(const cv::Mat &im);
virtual void Run(cv::Mat *im, ImageBlob *data);
private:
int interp_ = 2;
bool keep_ratio_;
std::vector<int> target_size_;
std::vector<int> in_net_shape_;
};
// Models with FPN need input shape % stride == 0
class PadStride : public PreprocessOp {
public:
virtual void Init(const YAML::Node &item) {
stride_ = item["stride"].as<int>();
}
virtual void Run(cv::Mat *im, ImageBlob *data);
private:
int stride_;
};
class Preprocessor {
public:
void Init(const YAML::Node &config_node) {
// initialize image info at first
ops_["InitInfo"] = std::make_shared<InitInfo>();
for (int i = 0; i < config_node.size(); ++i) {
if (config_node[i]["DetResize"].IsDefined()) {
ops_["Resize"] = std::make_shared<Resize>();
ops_["Resize"]->Init(config_node[i]["DetResize"]);
}
if (config_node[i]["DetNormalizeImage"].IsDefined()) {
ops_["NormalizeImage"] = std::make_shared<NormalizeImage>();
ops_["NormalizeImage"]->Init(config_node[i]["DetNormalizeImage"]);
}
if (config_node[i]["DetPermute"].IsDefined()) {
ops_["Permute"] = std::make_shared<Permute>();
ops_["Permute"]->Init(config_node[i]["DetPermute"]);
}
if (config_node[i]["DetPadStrid"].IsDefined()) {
ops_["PadStride"] = std::make_shared<PadStride>();
ops_["PadStride"]->Init(config_node[i]["DetPadStrid"]);
}
}
}
void Run(cv::Mat *im, ImageBlob *data);
public:
static const std::vector <std::string> RUN_ORDER;
private:
std::unordered_map <std::string, std::shared_ptr<PreprocessOp>> ops_;
};
} // namespace Detection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
#include "yaml-cpp/yaml.h"
#include <cstring>
#include <faiss/Index.h>
#include <faiss/index_io.h>
#include <map>
struct SearchResult {
std::vector <faiss::Index::idx_t> I;
std::vector<float> D;
int return_k;
};
class VectorSearch {
public:
explicit VectorSearch(const YAML::Node &config_file) {
// IndexProcess
this->index_dir =
config_file["IndexProcess"]["index_dir"].as<std::string>();
this->return_k = config_file["IndexProcess"]["return_k"].as<int>();
this->score_thres = config_file["IndexProcess"]["score_thres"].as<float>();
this->max_query_number =
config_file["Global"]["max_det_results"].as<int>() + 1;
LoadIdMap();
LoadIndexFile();
this->I.resize(this->return_k * this->max_query_number);
this->D.resize(this->return_k * this->max_query_number);
};
void LoadIdMap();
void LoadIndexFile();
const SearchResult &Search(float *feature, int query_number);
const std::string &GetLabel(faiss::Index::idx_t ind);
const float &GetThreshold() { return this->score_thres; }
private:
std::string index_dir;
int return_k = 5;
float score_thres = 0.5;
std::map<long int, std::string> id_map;
faiss::Index *index;
int max_query_number = 6;
std::vector<float> D;
std::vector <faiss::Index::idx_t> I;
SearchResult sr;
};
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <cstring>
#include <fstream>
#include <map>
#include <numeric>
#include <regex>
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "yaml-cpp/yaml.h"
class YamlConfig {
public:
explicit YamlConfig(const std::string &path) {
config_file = ReadYamlConfig(path);
}
static std::vector <std::string> ReadDict(const std::string &path);
static std::map<int, std::string> ReadIndexId(const std::string &path);
static YAML::Node ReadYamlConfig(const std::string &path);
void PrintConfigInfo();
YAML::Node config_file;
};
# 服务器端C++预测
本教程将介绍在服务器端部署PP-ShiTU的详细步骤。
## 1. 准备环境
### 运行准备
- Linux环境,推荐使用ubuntu docker。
### 1.1 升级cmake
由于依赖库编译需要较高版本的cmake,因此,第一步首先将cmake升级。
- 下载最新版本cmake
```shell
# 当前版本最新为3.22.0,根据实际情况自行下载,建议最新版本
wget https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0.tar.gz
tar xf cmake-3.22.0.tar.gz
```
最终可以在当前目录下看到`cmake-3.22.0/`的文件夹。
- 编译cmake,首先设置came源码路径(`root_path`)以及安装路径(`install_path`),`root_path`为下载的came源码路径,`install_path`为came的安装路径。在本例中,源码路径即为当前目录下的`cmake-3.22.0/`
```shell
cd ./cmake-3.22.0
export root_path=$PWD
export install_path=${root_path}/cmake
```
- 然后在cmake源码路径下,按照下面的方式进行编译
```shell
./bootstrap --prefix=${install_path}
make -j
make install
```
- 设置环境变量
```shell
export PATH=${install_path}/bin:$PATH
#检查是否正常使用
cmake --version
```
此时,cmake就可以使用了
### 1.2 编译opencv库
* 首先需要从opencv官网上下载在Linux环境下源码编译的包,以3.4.7版本为例,下载及解压缩命令如下:
```
wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz
tar -xvf 3.4.7.tar.gz
```
最终可以在当前目录下看到`opencv-3.4.7/`的文件夹。
* 编译opencv,首先设置opencv源码路径(`root_path`)以及安装路径(`install_path`),`root_path`为下载的opencv源码路径,`install_path`为opencv的安装路径。在本例中,源码路径即为当前目录下的`opencv-3.4.7/`
```shell
cd ./opencv-3.4.7
export root_path=$PWD
export install_path=${root_path}/opencv3
```
* 然后在opencv源码路径下,按照下面的方式进行编译。
```shell
rm -rf build
mkdir build
cd build
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DWITH_IPP=OFF \
-DBUILD_IPP_IW=OFF \
-DWITH_LAPACK=OFF \
-DWITH_EIGEN=OFF \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DWITH_ZLIB=ON \
-DBUILD_ZLIB=ON \
-DWITH_JPEG=ON \
-DBUILD_JPEG=ON \
-DWITH_PNG=ON \
-DBUILD_PNG=ON \
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON
make -j
make install
```
* `make install`完成之后,会在该文件夹下生成opencv头文件和库文件,用于后面的PaddleClas代码编译。
以opencv3.4.7版本为例,最终在安装路径下的文件结构如下所示。**注意**:不同的opencv版本,下述的文件结构可能不同。
```
opencv3/
|-- bin
|-- include
|-- lib64
|-- share
```
### 1.3 下载或者编译Paddle预测库
* 有2种方式获取Paddle预测库,下面进行详细介绍。
#### 1.3.1 预测库源码编译
* 如果希望获取最新预测库特性,可以从Paddle github上克隆最新代码,源码编译预测库。
* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。
```shell
git clone https://github.com/PaddlePaddle/Paddle.git
```
* 进入Paddle目录后,使用如下方法编译。
```shell
rm -rf build
mkdir build
cd build
cmake .. \
-DWITH_CONTRIB=OFF \
-DWITH_MKL=ON \
-DWITH_MKLDNN=ON \
-DWITH_TESTING=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DWITH_INFERENCE_API_TEST=OFF \
-DON_INFER=ON \
-DWITH_PYTHON=ON
make -j
make inference_lib_dist
```
更多编译参数选项可以参考[Paddle C++预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)
* 编译完成之后,可以在`build/paddle_inference_install_dir/`文件下看到生成了以下文件及文件夹。
```
build/paddle_inference_install_dir/
|-- CMakeCache.txt
|-- paddle
|-- third_party
|-- version.txt
```
其中`paddle`就是之后进行C++预测时所需的Paddle库,`version.txt`中包含当前预测库的版本信息。
#### 1.3.2 直接下载安装
* [Paddle预测库官网](https://paddle-inference.readthedocs.io/en/latest/user_guides/download_lib.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本,注意必须选择`develop`版本。
`https://paddle-inference-lib.bj.bcebos.com/2.1.1-gpu-cuda10.2-cudnn8.1-mkl-gcc8.2/paddle_inference.tgz``develop`版本为例,使用下述命令下载并解压:
```shell
wget https://paddle-inference-lib.bj.bcebos.com/2.1.1-gpu-cuda10.2-cudnn8.1-mkl-gcc8.2/paddle_inference.tgz
tar -xvf paddle_inference.tgz
```
最终会在当前的文件夹中生成`paddle_inference/`的子文件夹。
### 1.4 安装faiss库
```shell
# 下载 faiss
git clone https://github.com/facebookresearch/faiss.git
cd faiss
export faiss_install_path=$PWD/faiss_install
cmake -B build . -DFAISS_ENABLE_PYTHON=OFF -DCMAKE_INSTALL_PREFIX=${faiss_install_path}
make -C build -j faiss
make -C build install
```
在安装`faiss`前,请安装`openblas``ubuntu`系统中安装命令如下:
```shell
apt-get install libopenblas-dev
```
注意本教程以安装faiss cpu版本为例,安装时请参考[faiss](https://github.com/facebookresearch/faiss)官网文档,根据需求自行安装。
## 2 代码编译
### 2.2 编译PaddleClas C++预测demo
编译命令如下,其中Paddle C++预测库、opencv等其他依赖库的地址需要换成自己机器上的实际地址。同时,编译过程中需要下载编译`yaml-cpp`等C++库,请保持联网环境。
```shell
sh tools/build.sh
```
具体地,`tools/build.sh`中内容如下,请根据具体路径修改。
```shell
OPENCV_DIR=${opencv_install_dir}
LIB_DIR=${paddle_inference_dir}
CUDA_LIB_DIR=/usr/local/cuda/lib64
CUDNN_LIB_DIR=/usr/lib/x86_64-linux-gnu/
FAISS_DIR=${faiss_install_dir}
FAISS_WITH_MKL=OFF
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DUSE_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DFAISS_DIR=${FAISS_DIR} \
-DFAISS_WITH_MKL=${FAISS_WITH_MKL}
make -j
```
上述命令中,
* `OPENCV_DIR`为opencv编译安装的地址(本例中为`opencv-3.4.7/opencv3`文件夹的路径);
* `LIB_DIR`为下载的Paddle预测库(`paddle_inference`文件夹),或编译生成的Paddle预测库(`build/paddle_inference_install_dir`文件夹)的路径;
* `CUDA_LIB_DIR`为cuda库文件地址,在docker中为`/usr/local/cuda/lib64`
* `CUDNN_LIB_DIR`为cudnn库文件地址,在docker中为`/usr/lib/x86_64-linux-gnu/`
* `TENSORRT_DIR`是tensorrt库文件地址,在dokcer中为`/usr/local/TensorRT6-cuda10.0-cudnn7/`,TensorRT需要结合GPU使用。
* `FAISS_DIR`是faiss的安装地址
* `FAISS_WITH_MKL`是指在编译faiss的过程中,是否使用了mkldnn,本文档中编译faiss,没有使用,而使用了openblas,故设置为`OFF`,若使用了mkldnn,则为`ON`.
在执行上述命令,编译完成之后,会在当前路径下生成`build`文件夹,其中生成一个名为`pp_shitu`的可执行文件。
## 3 运行demo
- 请参考[识别快速开始文档](../../docs/zh_CN/quick_start/quick_start_recognition.md),下载好相应的 轻量级通用主体检测模型、轻量级通用识别模型及瓶装饮料测试数据并解压。
```shell
mkdir models
cd models
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
tar -xf picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar
cd ..
mkdir data
cd data
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar
tar -xf drink_dataset_v1.0.tar
cd ..
```
- 将相应的yaml文件拷到`test`文件夹下
```shell
cp ../configs/inference_drink.yaml .
```
-`inference_drink.yaml`中的相对路径,改成基于本目录的路径或者绝对路径。涉及到的参数有
- Global.infer_imgs :此参数可以是具体的图像地址,也可以是图像集所在的目录
- Global.det_inference_model_dir : 检测模型存储目录
- Global.rec_inference_model_dir : 识别模型存储目录
- IndexProcess.index_dir : 检索库的存储目录,在示例中,检索库在下载的demo数据中。
- 字典转换
由于python的检索库的字典,使用`pickle`进行的序列化存储,导致C++不方便读取,因此进行转换
```shell
python tools/transform_id_map.py -c inference_drink.yaml
```
转换成功后,在`IndexProcess.index_dir`目录下生成`id_map.txt`,方便c++ 读取。
- 执行程序
```shell
./build/pp_shitu -c inference_drink.yaml
# or
./build/pp_shitu -config inference_drink.yaml
```
若对图像集进行检索,则可能得到,如下结果。注意,此结果只做展示,具体以实际运行结果为准。
同时,需注意的是,由于opencv 版本问题,会导致图像在预处理的过程中,resize产生细微差别,导致python 和c++结果,轻微不同,如bbox相差几个像素,检索结果小数点后3位diff等。但不会改变最终检索label。
![](../../docs/images/quick_start/shitu_c++_result.png)
## 4 使用自己模型
使用自己训练的模型,可以参考[模型导出](../../docs/zh_CN/inference_deployment/export_model.md),导出`inference model`,用于模型预测。
同时注意修改`yaml`文件中具体参数。
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>
#include <include/feature_extracter.h>
#include <numeric>
namespace Feature {
void FeatureExtracter::LoadModel(const std::string &model_path,
const std::string &params_path) {
paddle_infer::Config config;
config.SetModel(model_path, params_path);
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 1, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
}
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
}
config.SwitchUseFeedFetchOps(false);
// true for multiple input
config.SwitchSpecifyInputNames(true);
config.SwitchIrOptim(true);
config.EnableMemoryOptim();
config.DisableGlogInfo();
this->predictor_ = CreatePredictor(config);
}
void FeatureExtracter::Run(cv::Mat &img, std::vector<float> &out_data,
std::vector<double> &times) {
cv::Mat resize_img;
std::vector<double> time;
auto preprocess_start = std::chrono::system_clock::now();
this->resize_op_.Run(img, resize_img, this->resize_short_,
this->resize_size_);
this->normalize_op_.Run(&resize_img, this->mean_, this->std_, this->scale_);
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
this->permute_op_.Run(&resize_img, input.data());
auto input_names = this->predictor_->GetInputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
auto preprocess_end = std::chrono::system_clock::now();
auto infer_start = std::chrono::system_clock::now();
input_t->CopyFromCpu(input.data());
this->predictor_->Run();
auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
out_data.resize(out_num);
output_t->CopyToCpu(out_data.data());
auto infer_end = std::chrono::system_clock::now();
auto postprocess_start = std::chrono::system_clock::now();
if (this->feature_norm)
FeatureNorm(out_data);
auto postprocess_end = std::chrono::system_clock::now();
std::chrono::duration<float> preprocess_diff =
preprocess_end - preprocess_start;
time.push_back(double(preprocess_diff.count()));
std::chrono::duration<float> inference_diff = infer_end - infer_start;
double inference_cost_time = double(inference_diff.count());
time.push_back(inference_cost_time);
// std::chrono::duration<float> postprocess_diff =
// postprocess_end - postprocess_start;
time.push_back(0);
// std::cout << "result: " << std::endl;
// std::cout << "\tclass id: " << maxPosition << std::endl;
// std::cout << std::fixed << std::setprecision(10)
// << "\tscore: " << double(out_data[maxPosition]) << std::endl;
times[0] += time[0];
times[1] += time[1];
times[2] += time[2];
}
void FeatureExtracter::FeatureNorm(std::vector<float> &featuer) {
float featuer_sqrt = std::sqrt(std::inner_product(
featuer.begin(), featuer.end(), featuer.begin(), 0.0f));
for (int i = 0; i < featuer.size(); ++i)
featuer[i] /= featuer_sqrt;
}
} // namespace Feature
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <opencv2/core/utils/filesystem.hpp>
#include <ostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <numeric>
#include <auto_log/autolog.h>
#include <gflags/gflags.h>
#include <include/feature_extracter.h>
#include <include/nms.h>
#include <include/object_detector.h>
#include <include/vector_search.h>
#include <include/yaml_config.h>
using namespace std;
using namespace cv;
DEFINE_string(config,
"", "Path of yaml file");
DEFINE_string(c,
"", "Path of yaml file");
void DetPredictImage(const std::vector <cv::Mat> &batch_imgs,
const std::vector <std::string> &all_img_paths,
const int batch_size, Detection::ObjectDetector *det,
std::vector <Detection::ObjectResult> &im_result,
std::vector<int> &im_bbox_num, std::vector<double> &det_t,
const bool visual_det = false,
const bool run_benchmark = false,
const std::string &output_dir = "output") {
int steps = ceil(float(all_img_paths.size()) / batch_size);
// printf("total images = %d, batch_size = %d, total steps = %d\n",
// all_img_paths.size(), batch_size, steps);
for (int idx = 0; idx < steps; idx++) {
int left_image_cnt = all_img_paths.size() - idx * batch_size;
if (left_image_cnt > batch_size) {
left_image_cnt = batch_size;
}
// for (int bs = 0; bs < left_image_cnt; bs++) {
// std::string image_file_path = all_img_paths.at(idx * batch_size+bs);
// cv::Mat im = cv::imread(image_file_path, 1);
// batch_imgs.insert(batch_imgs.end(), im);
// }
// Store all detected result
std::vector <Detection::ObjectResult> result;
std::vector<int> bbox_num;
std::vector<double> det_times;
bool is_rbox = false;
if (run_benchmark) {
det->Predict(batch_imgs, 10, 10, &result, &bbox_num, &det_times);
} else {
det->Predict(batch_imgs, 0, 1, &result, &bbox_num, &det_times);
// get labels and colormap
auto labels = det->GetLabelList();
auto colormap = Detection::GenerateColorMap(labels.size());
int item_start_idx = 0;
for (int i = 0; i < left_image_cnt; i++) {
cv::Mat im = batch_imgs[i];
int detect_num = 0;
for (int j = 0; j < bbox_num[i]; j++) {
Detection::ObjectResult item = result[item_start_idx + j];
if (item.confidence < det->GetThreshold() || item.class_id == -1) {
continue;
}
detect_num += 1;
im_result.push_back(item);
if (visual_det) {
if (item.rect.size() > 6) {
is_rbox = true;
printf(
"class=%d confidence=%.4f rect=[%d %d %d %d %d %d %d %d]\n",
item.class_id, item.confidence, item.rect[0], item.rect[1],
item.rect[2], item.rect[3], item.rect[4], item.rect[5],
item.rect[6], item.rect[7]);
} else {
printf("class=%d confidence=%.4f rect=[%d %d %d %d]\n",
item.class_id, item.confidence, item.rect[0], item.rect[1],
item.rect[2], item.rect[3]);
}
}
}
im_bbox_num.push_back(detect_num);
item_start_idx = item_start_idx + bbox_num[i];
// Visualization result
if (visual_det) {
std::cout << all_img_paths.at(idx * batch_size + i)
<< " The number of detected box: " << detect_num
<< std::endl;
cv::Mat vis_img = Detection::VisualizeResult(im, im_result, labels,
colormap, is_rbox);
std::vector<int> compression_params;
compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
compression_params.push_back(95);
std::string output_path(output_dir);
if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {
output_path += OS_PATH_SEP;
}
std::string image_file_path = all_img_paths.at(idx * batch_size + i);
output_path +=
image_file_path.substr(image_file_path.find_last_of('/') + 1);
cv::imwrite(output_path, vis_img, compression_params);
printf("Visualized output saved as %s\n", output_path.c_str());
}
}
}
det_t[0] += det_times[0];
det_t[1] += det_times[1];
det_t[2] += det_times[2];
}
}
void PrintResult(std::string &img_path,
std::vector <Detection::ObjectResult> &det_result,
std::vector<int> &indeices, VectorSearch &vector_search,
SearchResult &search_result) {
printf("%s:\n", img_path.c_str());
for (int i = 0; i < indeices.size(); ++i) {
int t = indeices[i];
printf("\tresult%d: bbox[%d, %d, %d, %d], score: %f, label: %s\n", i,
det_result[t].rect[0], det_result[t].rect[1], det_result[t].rect[2],
det_result[t].rect[3], det_result[t].confidence,
vector_search.GetLabel(search_result.I[search_result.return_k * t])
.c_str());
}
}
int main(int argc, char **argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
std::string yaml_path = "";
if (FLAGS_config == "" && FLAGS_c == "") {
std::cerr << "[ERROR] usage: " << std::endl
<< argv[0] << " -c $yaml_path" << std::endl
<< "or:" << std::endl
<< argv[0] << " -config $yaml_path" << std::endl;
exit(1);
} else if (FLAGS_config != "") {
yaml_path = FLAGS_config;
} else {
yaml_path = FLAGS_c;
}
YamlConfig config(yaml_path);
config.PrintConfigInfo();
// initialize detector, rec_Model, vector_search
Feature::FeatureExtracter feature_extracter(config.config_file);
Detection::ObjectDetector detector(config.config_file);
VectorSearch searcher(config.config_file);
// config
const int batch_size = config.config_file["Global"]["batch_size"].as<int>();
bool visual_det = false;
if (config.config_file["Global"]["visual_det"].IsDefined()) {
visual_det = config.config_file["Global"]["visual_det"].as<bool>();
}
bool run_benchmark = false;
if (config.config_file["Global"]["benchmark"].IsDefined()) {
run_benchmark = config.config_file["Global"]["benchmark"].as<bool>();
}
int max_det_results = 5;
if (config.config_file["Global"]["max_det_results"].IsDefined()) {
max_det_results = config.config_file["Global"]["max_det_results"].as<int>();
}
float rec_nms_thresold = 0.05;
if (config.config_file["Global"]["rec_nms_thresold"].IsDefined()) {
rec_nms_thresold =
config.config_file["Global"]["rec_nms_thresold"].as<float>();
}
// load image_file_path
std::string path =
config.config_file["Global"]["infer_imgs"].as<std::string>();
std::vector <std::string> img_files_list;
if (cv::utils::fs::isDirectory(path)) {
std::vector <cv::String> filenames;
cv::glob(path, filenames);
for (auto f : filenames) {
img_files_list.push_back(f);
}
} else {
img_files_list.push_back(path);
}
std::cout << "img_file_list length: " << img_files_list.size() << std::endl;
// for time log
std::vector<double> cls_times = {0, 0, 0};
std::vector<double> det_times = {0, 0, 0};
// for read images
std::vector <cv::Mat> batch_imgs;
std::vector <std::string> img_paths;
// for detection
std::vector <Detection::ObjectResult> det_result;
std::vector<int> det_bbox_num;
// for vector search
std::vector<float> features;
std::vector<float> feature;
// for nms
std::vector<int> indeices;
int warmup_iter = img_files_list.size() > 5 ? 5 : 0;
for (int idx = 0; idx < img_files_list.size(); ++idx) {
std::string img_path = img_files_list[idx];
cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << img_path
<< "\n";
exit(-1);
}
cv::cvtColor(srcimg, srcimg, cv::COLOR_BGR2RGB);
batch_imgs.push_back(srcimg);
img_paths.push_back(img_path);
// step1: get all detection results
DetPredictImage(batch_imgs, img_paths, batch_size, &detector, det_result,
det_bbox_num, det_times, visual_det, run_benchmark);
// select max_det_results bbox
if (det_result.size() > max_det_results) {
det_result.resize(max_det_results);
}
// step2: add the whole image for recognition to improve recall
Detection::ObjectResult result_whole_img = {
{0, 0, srcimg.cols - 1, srcimg.rows - 1}, 0, 1.0};
det_result.push_back(result_whole_img);
det_bbox_num[0] = det_result.size() + 1;
// step3: extract feature for all boxes in an inmage
SearchResult search_result;
for (int j = 0; j < det_result.size(); ++j) {
int w = det_result[j].rect[2] - det_result[j].rect[0];
int h = det_result[j].rect[3] - det_result[j].rect[1];
cv::Rect rect(det_result[j].rect[0], det_result[j].rect[1], w, h);
cv::Mat crop_img = srcimg(rect);
feature_extracter.Run(crop_img, feature, cls_times);
features.insert(features.end(), feature.begin(), feature.end());
}
// step4: get search result
search_result = searcher.Search(features.data(), det_result.size());
// nms for search result
for (int i = 0; i < det_result.size(); ++i) {
det_result[i].confidence = search_result.D[search_result.return_k * i];
}
NMSBoxes(det_result, searcher.GetThreshold(), rec_nms_thresold, indeices);
// print result
PrintResult(img_path, det_result, indeices, searcher, search_result);
// for postprocess
batch_imgs.clear();
img_paths.clear();
det_bbox_num.clear();
det_result.clear();
feature.clear();
features.clear();
indeices.clear();
}
std::string presion = "fp32";
// if (config.use_fp16)
// presion = "fp16";
// if (config.benchmark) {
// AutoLogger autolog("Classification", config.use_gpu, config.use_tensorrt,
// config.use_mkldnn, config.cpu_threads, 1,
// "1, 3, 224, 224", presion, cls_times,
// img_files_list.size());
// autolog.report();
// }
return 0;
}
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sstream>
// for setprecision
#include "include/object_detector.h"
#include <chrono>
#include <iomanip>
using namespace paddle_infer;
namespace Detection {
// Load Model and create model predictor
void ObjectDetector::LoadModel(const std::string &model_dir,
const int batch_size,
const std::string &run_mode) {
paddle_infer::Config config;
std::string prog_file = model_dir + OS_PATH_SEP + "inference.pdmodel";
std::string params_file = model_dir + OS_PATH_SEP + "inference.pdiparams";
config.SetModel(prog_file, params_file);
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
config.SwitchIrOptim(this->ir_optim_);
// // use tensorrt
// if (run_mode != "fluid") {
// auto precision = paddle_infer::Config::Precision::kFloat32;
// if (run_mode == "trt_fp32") {
// precision = paddle_infer::Config::Precision::kFloat32;
// }
// else if (run_mode == "trt_fp16") {
// precision = paddle_infer::Config::Precision::kHalf;
// }
// else if (run_mode == "trt_int8") {
// precision = paddle_infer::Config::Precision::kInt8;
// } else {
// printf("run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or
// 'trt_int8'");
// }
// set tensorrt
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 30, batch_size, this->min_subgraph_size_,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, this->trt_calib_mode_);
// set use dynamic shape
if (this->use_dynamic_shape_) {
// set DynamicShsape for image tensor
const std::vector<int> min_input_shape = {1, 3, this->trt_min_shape_,
this->trt_min_shape_};
const std::vector<int> max_input_shape = {1, 3, this->trt_max_shape_,
this->trt_max_shape_};
const std::vector<int> opt_input_shape = {1, 3, this->trt_opt_shape_,
this->trt_opt_shape_};
const std::map <std::string, std::vector<int>> map_min_input_shape = {
{"image", min_input_shape}};
const std::map <std::string, std::vector<int>> map_max_input_shape = {
{"image", max_input_shape}};
const std::map <std::string, std::vector<int>> map_opt_input_shape = {
{"image", opt_input_shape}};
config.SetTRTDynamicShapeInfo(map_min_input_shape, map_max_input_shape,
map_opt_input_shape);
std::cout << "TensorRT dynamic shape enabled" << std::endl;
}
}
// } else if (this->device_ == "XPU"){
// config.EnableXpu(10*1024*1024);
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
}
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
}
config.SwitchUseFeedFetchOps(false);
config.SwitchIrOptim(this->ir_optim_);
config.DisableGlogInfo();
// Memory optimization
config.EnableMemoryOptim();
predictor_ = std::move(CreatePredictor(config));
}
// Visualiztion MaskDetector results
cv::Mat VisualizeResult(const cv::Mat &img,
const std::vector <ObjectResult> &results,
const std::vector <std::string> &lables,
const std::vector<int> &colormap,
const bool is_rbox = false) {
cv::Mat vis_img = img.clone();
for (int i = 0; i < results.size(); ++i) {
// Configure color and text size
std::ostringstream oss;
oss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
oss << lables[results[i].class_id] << " ";
oss << results[i].confidence;
std::string text = oss.str();
int c1 = colormap[3 * results[i].class_id + 0];
int c2 = colormap[3 * results[i].class_id + 1];
int c3 = colormap[3 * results[i].class_id + 2];
cv::Scalar roi_color = cv::Scalar(c1, c2, c3);
int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL;
double font_scale = 0.5f;
float thickness = 0.5;
cv::Size text_size =
cv::getTextSize(text, font_face, font_scale, thickness, nullptr);
cv::Point origin;
if (is_rbox) {
// Draw object, text, and background
for (int k = 0; k < 4; k++) {
cv::Point pt1 = cv::Point(results[i].rect[(k * 2) % 8],
results[i].rect[(k * 2 + 1) % 8]);
cv::Point pt2 = cv::Point(results[i].rect[(k * 2 + 2) % 8],
results[i].rect[(k * 2 + 3) % 8]);
cv::line(vis_img, pt1, pt2, roi_color, 2);
}
} else {
int w = results[i].rect[2] - results[i].rect[0];
int h = results[i].rect[3] - results[i].rect[1];
cv::Rect roi = cv::Rect(results[i].rect[0], results[i].rect[1], w, h);
// Draw roi object, text, and background
cv::rectangle(vis_img, roi, roi_color, 2);
}
origin.x = results[i].rect[0];
origin.y = results[i].rect[1];
// Configure text background
cv::Rect text_back =
cv::Rect(results[i].rect[0], results[i].rect[1] - text_size.height,
text_size.width, text_size.height);
// Draw text, and background
cv::rectangle(vis_img, text_back, roi_color, -1);
cv::putText(vis_img, text, origin, font_face, font_scale,
cv::Scalar(255, 255, 255), thickness);
}
return vis_img;
}
void ObjectDetector::Preprocess(const cv::Mat &ori_im) {
// Clone the image : keep the original mat for postprocess
cv::Mat im = ori_im.clone();
cv::cvtColor(im, im, cv::COLOR_BGR2RGB);
preprocessor_.Run(&im, &inputs_);
}
void ObjectDetector::Postprocess(const std::vector <cv::Mat> mats,
std::vector <ObjectResult> *result,
std::vector<int> bbox_num,
bool is_rbox = false) {
result->clear();
int start_idx = 0;
for (int im_id = 0; im_id < mats.size(); im_id++) {
cv::Mat raw_mat = mats[im_id];
int rh = 1;
int rw = 1;
// if (config_.arch_ == "Face") {
// rh = raw_mat.rows;
// rw = raw_mat.cols;
// }
for (int j = start_idx; j < start_idx + bbox_num[im_id]; j++) {
if (is_rbox) {
// Class id
int class_id = static_cast<int>(round(output_data_[0 + j * 10]));
// Confidence score
float score = output_data_[1 + j * 10];
int x1 = (output_data_[2 + j * 10] * rw);
int y1 = (output_data_[3 + j * 10] * rh);
int x2 = (output_data_[4 + j * 10] * rw);
int y2 = (output_data_[5 + j * 10] * rh);
int x3 = (output_data_[6 + j * 10] * rw);
int y3 = (output_data_[7 + j * 10] * rh);
int x4 = (output_data_[8 + j * 10] * rw);
int y4 = (output_data_[9 + j * 10] * rh);
ObjectResult result_item;
result_item.rect = {x1, y1, x2, y2, x3, y3, x4, y4};
result_item.class_id = class_id;
result_item.confidence = score;
result->push_back(result_item);
} else {
// Class id
int class_id = static_cast<int>(round(output_data_[0 + j * 6]));
// Confidence score
float score = output_data_[1 + j * 6];
int xmin = (output_data_[2 + j * 6] * rw);
int ymin = (output_data_[3 + j * 6] * rh);
int xmax = (output_data_[4 + j * 6] * rw);
int ymax = (output_data_[5 + j * 6] * rh);
int wd = xmax - xmin;
int hd = ymax - ymin;
ObjectResult result_item;
result_item.rect = {xmin, ymin, xmax, ymax};
result_item.class_id = class_id;
result_item.confidence = score;
result->push_back(result_item);
}
}
start_idx += bbox_num[im_id];
}
}
void ObjectDetector::Predict(const std::vector <cv::Mat> imgs, const int warmup,
const int repeats,
std::vector <ObjectResult> *result,
std::vector<int> *bbox_num,
std::vector<double> *times) {
auto preprocess_start = std::chrono::steady_clock::now();
int batch_size = imgs.size();
// in_data_batch
std::vector<float> in_data_all;
std::vector<float> im_shape_all(batch_size * 2);
std::vector<float> scale_factor_all(batch_size * 2);
// Preprocess image
for (int bs_idx = 0; bs_idx < batch_size; bs_idx++) {
cv::Mat im = imgs.at(bs_idx);
Preprocess(im);
im_shape_all[bs_idx * 2] = inputs_.im_shape_[0];
im_shape_all[bs_idx * 2 + 1] = inputs_.im_shape_[1];
scale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0];
scale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1];
// TODO: reduce cost time
in_data_all.insert(in_data_all.end(), inputs_.im_data_.begin(),
inputs_.im_data_.end());
}
// Prepare input tensor
auto input_names = predictor_->GetInputNames();
for (const auto &tensor_name : input_names) {
auto in_tensor = predictor_->GetInputHandle(tensor_name);
if (tensor_name == "image") {
int rh = inputs_.in_net_shape_[0];
int rw = inputs_.in_net_shape_[1];
in_tensor->Reshape({batch_size, 3, rh, rw});
in_tensor->CopyFromCpu(in_data_all.data());
} else if (tensor_name == "im_shape") {
in_tensor->Reshape({batch_size, 2});
in_tensor->CopyFromCpu(im_shape_all.data());
} else if (tensor_name == "scale_factor") {
in_tensor->Reshape({batch_size, 2});
in_tensor->CopyFromCpu(scale_factor_all.data());
}
}
auto preprocess_end = std::chrono::steady_clock::now();
// Run predictor
// warmup
for (int i = 0; i < warmup; i++) {
predictor_->Run();
// Get output tensor
auto output_names = predictor_->GetOutputNames();
auto out_tensor = predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = out_tensor->shape();
auto out_bbox_num = predictor_->GetOutputHandle(output_names[1]);
std::vector<int> out_bbox_num_shape = out_bbox_num->shape();
// Calculate output length
int output_size = 1;
for (int j = 0; j < output_shape.size(); ++j) {
output_size *= output_shape[j];
}
if (output_size < 6) {
std::cerr << "[WARNING] No object detected." << std::endl;
}
output_data_.resize(output_size);
out_tensor->CopyToCpu(output_data_.data());
int out_bbox_num_size = 1;
for (int j = 0; j < out_bbox_num_shape.size(); ++j) {
out_bbox_num_size *= out_bbox_num_shape[j];
}
out_bbox_num_data_.resize(out_bbox_num_size);
out_bbox_num->CopyToCpu(out_bbox_num_data_.data());
}
bool is_rbox = false;
auto inference_start = std::chrono::steady_clock::now();
for (int i = 0; i < repeats; i++) {
predictor_->Run();
// Get output tensor
auto output_names = predictor_->GetOutputNames();
auto out_tensor = predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = out_tensor->shape();
auto out_bbox_num = predictor_->GetOutputHandle(output_names[1]);
std::vector<int> out_bbox_num_shape = out_bbox_num->shape();
// Calculate output length
int output_size = 1;
for (int j = 0; j < output_shape.size(); ++j) {
output_size *= output_shape[j];
}
is_rbox = output_shape[output_shape.size() - 1] % 10 == 0;
if (output_size < 6) {
std::cerr << "[WARNING] No object detected." << std::endl;
}
output_data_.resize(output_size);
out_tensor->CopyToCpu(output_data_.data());
int out_bbox_num_size = 1;
for (int j = 0; j < out_bbox_num_shape.size(); ++j) {
out_bbox_num_size *= out_bbox_num_shape[j];
}
out_bbox_num_data_.resize(out_bbox_num_size);
out_bbox_num->CopyToCpu(out_bbox_num_data_.data());
}
auto inference_end = std::chrono::steady_clock::now();
auto postprocess_start = std::chrono::steady_clock::now();
// Postprocessing result
result->clear();
Postprocess(imgs, result, out_bbox_num_data_, is_rbox);
bbox_num->clear();
for (int k = 0; k < out_bbox_num_data_.size(); k++) {
int tmp = out_bbox_num_data_[k];
bbox_num->push_back(tmp);
}
auto postprocess_end = std::chrono::steady_clock::now();
std::chrono::duration<float> preprocess_diff =
preprocess_end - preprocess_start;
times->push_back(double(preprocess_diff.count() * 1000));
std::chrono::duration<float> inference_diff = inference_end - inference_start;
times->push_back(double(inference_diff.count() / repeats * 1000));
std::chrono::duration<float> postprocess_diff =
postprocess_end - postprocess_start;
times->push_back(double(postprocess_diff.count() * 1000));
}
std::vector<int> GenerateColorMap(int num_class) {
auto colormap = std::vector<int>(3 * num_class, 0);
for (int i = 0; i < num_class; ++i) {
int j = 0;
int lab = i;
while (lab) {
colormap[i * 3] |= (((lab >> 0) & 1) << (7 - j));
colormap[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j));
colormap[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j));
++j;
lab >>= 3;
}
}
return colormap;
}
} // namespace Detection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "paddle_api.h"
#include "paddle_inference_api.h"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <math.h>
#include <numeric>
#include <include/preprocess_op.h>
namespace Feature {
void Permute::Run(const cv::Mat *im, float *data) {
int rh = im->rows;
int rw = im->cols;
int rc = im->channels();
for (int i = 0; i < rc; ++i) {
cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i);
}
}
void Normalize::Run(cv::Mat *im, const std::vector<float> &mean,
const std::vector<float> &std, float scale) {
(*im).convertTo(*im, CV_32FC3, scale);
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
(im->at<cv::Vec3f>(h, w)[0] - mean[0]) / std[0];
im->at<cv::Vec3f>(h, w)[1] =
(im->at<cv::Vec3f>(h, w)[1] - mean[1]) / std[1];
im->at<cv::Vec3f>(h, w)[2] =
(im->at<cv::Vec3f>(h, w)[2] - mean[2]) / std[2];
}
}
}
void CenterCropImg::Run(cv::Mat &img, const int crop_size) {
int resize_w = img.cols;
int resize_h = img.rows;
int w_start = int((resize_w - crop_size) / 2);
int h_start = int((resize_h - crop_size) / 2);
cv::Rect rect(w_start, h_start, crop_size, crop_size);
img = img(rect);
}
void ResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
int resize_short_size, int size) {
int resize_h = 0;
int resize_w = 0;
if (size > 0) {
resize_h = size;
resize_w = size;
} else {
int w = img.cols;
int h = img.rows;
float ratio = 1.f;
if (h < w) {
ratio = float(resize_short_size) / float(h);
} else {
ratio = float(resize_short_size) / float(w);
}
resize_h = round(float(h) * ratio);
resize_w = round(float(w) * ratio);
}
cv::resize(img, resize_img, cv::Size(resize_w, resize_h));
}
} // namespace Feature
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include <vector>
#include "include/preprocess_op_det.h"
namespace Detection {
void InitInfo::Run(cv::Mat *im, ImageBlob *data) {
data->im_shape_ = {static_cast<float>(im->rows),
static_cast<float>(im->cols)};
data->scale_factor_ = {1., 1.};
data->in_net_shape_ = {static_cast<float>(im->rows),
static_cast<float>(im->cols)};
}
void NormalizeImage::Run(cv::Mat *im, ImageBlob *data) {
double e = 1.0;
if (is_scale_) {
e /= 255.0;
}
(*im).convertTo(*im, CV_32FC3, e);
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
(im->at<cv::Vec3f>(h, w)[0] - mean_[0]) / scale_[0];
im->at<cv::Vec3f>(h, w)[1] =
(im->at<cv::Vec3f>(h, w)[1] - mean_[1]) / scale_[1];
im->at<cv::Vec3f>(h, w)[2] =
(im->at<cv::Vec3f>(h, w)[2] - mean_[2]) / scale_[2];
}
}
}
void Permute::Run(cv::Mat *im, ImageBlob *data) {
int rh = im->rows;
int rw = im->cols;
int rc = im->channels();
(data->im_data_).resize(rc * rh * rw);
float *base = (data->im_data_).data();
for (int i = 0; i < rc; ++i) {
cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);
}
}
void Resize::Run(cv::Mat *im, ImageBlob *data) {
auto resize_scale = GenerateScale(*im);
data->im_shape_ = {static_cast<float>(im->cols * resize_scale.first),
static_cast<float>(im->rows * resize_scale.second)};
data->in_net_shape_ = {static_cast<float>(im->cols * resize_scale.first),
static_cast<float>(im->rows * resize_scale.second)};
cv::resize(*im, *im, cv::Size(), resize_scale.first, resize_scale.second,
interp_);
data->im_shape_ = {
static_cast<float>(im->rows), static_cast<float>(im->cols),
};
data->scale_factor_ = {
resize_scale.second, resize_scale.first,
};
}
std::pair<double, double> Resize::GenerateScale(const cv::Mat &im) {
std::pair<double, double> resize_scale;
int origin_w = im.cols;
int origin_h = im.rows;
if (keep_ratio_) {
int im_size_max = std::max(origin_w, origin_h);
int im_size_min = std::min(origin_w, origin_h);
int target_size_max =
*std::max_element(target_size_.begin(), target_size_.end());
int target_size_min =
*std::min_element(target_size_.begin(), target_size_.end());
double scale_min =
static_cast<double>(target_size_min) / static_cast<double>(im_size_min);
double scale_max =
static_cast<double>(target_size_max) / static_cast<double>(im_size_max);
double scale_ratio = std::min(scale_min, scale_max);
resize_scale = {scale_ratio, scale_ratio};
} else {
resize_scale.first =
static_cast<double>(target_size_[1]) / static_cast<double>(origin_w);
resize_scale.second =
static_cast<double>(target_size_[0]) / static_cast<double>(origin_h);
}
return resize_scale;
}
void PadStride::Run(cv::Mat *im, ImageBlob *data) {
if (stride_ <= 0) {
return;
}
int rc = im->channels();
int rh = im->rows;
int rw = im->cols;
int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_;
int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_;
cv::copyMakeBorder(*im, *im, 0, nh - rh, 0, nw - rw, cv::BORDER_CONSTANT,
cv::Scalar(0));
data->in_net_shape_ = {
static_cast<float>(im->rows), static_cast<float>(im->cols),
};
}
// Preprocessor op running order
const std::vector <std::string> Preprocessor::RUN_ORDER = {
"InitInfo", "Resize", "NormalizeImage", "PadStride", "Permute"};
void Preprocessor::Run(cv::Mat *im, ImageBlob *data) {
for (const auto &name : RUN_ORDER) {
if (ops_.find(name) != ops_.end()) {
ops_[name]->Run(im, data);
}
}
}
} // namespace Detection
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/vector_search.h"
#include <cstdio>
#include <faiss/index_factory.h>
#include <faiss/index_io.h>
#include <fstream>
#include <iostream>
#include <regex>
void VectorSearch::LoadIndexFile() {
std::string file_path = this->index_dir + OS_PATH_SEP + "vector.index";
const char *fname = file_path.c_str();
this->index = faiss::read_index(fname, 0);
}
void VectorSearch::LoadIdMap() {
std::string file_path = this->index_dir + OS_PATH_SEP + "id_map.txt";
std::ifstream in(file_path);
std::string line;
std::vector <std::string> m_vec;
if (in) {
while (getline(in, line)) {
std::regex ws_re("\\s+");
std::vector <std::string> v(
std::sregex_token_iterator(line.begin(), line.end(), ws_re, -1),
std::sregex_token_iterator());
if (v.size() != 2) {
std::cout << "The number of element for each line in : " << file_path
<< "must be 2, exit the program..." << std::endl;
exit(1);
} else
this->id_map.insert(std::pair<long int, std::string>(
std::stol(v[0], nullptr, 10), v[1]));
}
}
}
const SearchResult &VectorSearch::Search(float *feature, int query_number) {
this->D.resize(this->return_k * query_number);
this->I.resize(this->return_k * query_number);
this->index->search(query_number, feature, return_k, D.data(), I.data());
this->sr.return_k = this->return_k;
this->sr.D = this->D;
this->sr.I = this->I;
return this->sr;
}
const std::string &VectorSearch::GetLabel(faiss::Index::idx_t ind) {
return this->id_map.at(ind);
}
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <ostream>
#include <vector>
#include <include/yaml_config.h>
std::vector <std::string> YamlConfig::ReadDict(const std::string &path) {
std::ifstream in(path);
std::string line;
std::vector <std::string> m_vec;
if (in) {
while (getline(in, line)) {
m_vec.push_back(line);
}
} else {
std::cout << "no such label file: " << path << ", exit the program..."
<< std::endl;
exit(1);
}
return m_vec;
}
std::map<int, std::string> YamlConfig::ReadIndexId(const std::string &path) {
std::ifstream in(path);
std::string line;
std::map<int, std::string> m_vec;
if (in) {
while (getline(in, line)) {
std::regex ws_re("\\s+");
std::vector <std::string> v(
std::sregex_token_iterator(line.begin(), line.end(), ws_re, -1),
std::sregex_token_iterator());
if (v.size() != 3) {
std::cout << "The number of element for each line in : " << path
<< "must be 3, exit the program..." << std::endl;
exit(1);
} else
m_vec.insert(std::pair<int, std::string>(stoi(v[0]), v[2]));
}
}
return m_vec;
}
YAML::Node YamlConfig::ReadYamlConfig(const std::string &path) {
YAML::Node config;
try {
config = YAML::LoadFile(path);
} catch (YAML::BadFile &e) {
std::cout << "Something wrong in yaml file, please check yaml file"
<< std::endl;
exit(1);
}
return config;
}
void YamlConfig::PrintConfigInfo() {
std::cout << this->config_file << std::endl;
// for (YAML::const_iterator
// it=config_file.begin();it!=config_file.end();++it)
// {
// std::cout << it->as<std::string>() << "\n";
// }
}
OPENCV_DIR=${opencv_install_dir}
LIB_DIR=${paddle_inference_dir}
CUDA_LIB_DIR=/usr/local/cuda/lib64
CUDNN_LIB_DIR=/usr/lib/x86_64-linux-gnu/
FAISS_DIR=${faiss_install_dir}
FAISS_WITH_MKL=OFF
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DUSE_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DFAISS_DIR=${FAISS_DIR} \
-DFAISS_WITH_MKL=${FAISS_WITH_MKL}
make -j
\ No newline at end of file
import argparse
import os
import pickle
import yaml
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
with open(args.config)as fd:
config = yaml.load(fd)
index_dir = ""
try:
index_dir = config["IndexProcess"]["index_dir"]
except Exception as e:
print("The IndexProcess.index_dir in config_file dose not exist")
exit(1)
id_map_path = os.path.join(index_dir, "id_map.pkl")
assert os.path.exists(id_map_path), "The id_map file dose not exist: {}".format(id_map_path)
with open(id_map_path, "rb")as fd:
ids = pickle.load(fd)
with open(os.path.join(index_dir, "id_map.txt"), "w")as fd:
for k, v in ids.items():
v = v.split("\t")[1]
fd.write(str(k) + " " + v + "\n")
print('Transform id_map sucess')
if __name__ == "__main__":
main()
......@@ -9,7 +9,16 @@
值得注意的是,为了更好是适配性,目前版本,`PaddleClas`中暂时**只使用CPU进行向量检索**
本文档主要主要介绍PaddleClas中检索模块的安装、使用的检索算法,及使用过程中的相关配置文件中参数介绍。
<div align="center">
<img src="../../images/structure.jpg" width = "800" />
</div>
如上图中所示,向量检索部分,在整个`PP-ShiTu`系统中有两部分内容
- 图中绿色部分:建立检索库,供检索时查询使用,同时提供增、删等功能
- 图中蓝色部分:检索功能,即给定一张图的特征向量,返回库中相似图像的label
本文档主要主要介绍PaddleClas中检索模块的安装、使用的检索算法、建库流程的及相关配置文件中参数介绍。
## 一、检索库安装
......@@ -31,13 +40,26 @@ pip install faiss-cpu==1.7.1post2
每种检索算法,满足不同场景。其中`HNSW32`为默认方法,此方法的检索精度、检索速度可以取得一个较好的平衡,具体算法介绍可以查看[官方文档](https://github.com/facebookresearch/faiss/wiki)
## 三、相关配置文档参数介绍
## 三、使用及配置文档介绍
涉及检索模块配置文件位于:`deploy/configs/`下,其中`build_*.yaml`是建立特征库的相关配置文件,`inference_*.yaml`是检索或者分类的推理配置文件。
### 3.1 建库配置文件参数
### 3.1 建库及配置文件参数
建库的具体操作如下:
```shell
# 进入deploy目录
cd deploy
# yaml文件根据需要改成自己所需的具体yaml文件
python python/build_gallery.py -c configs/build_***.yaml
```
其中`yaml`文件的建库的配置如下,在运行时,请根据实际情况进行修改。建库操作会将根据`data_file`的图像列表,将`image_root`下的图像进行特征提取,并在`index_dir`下进行存储,以待后续检索使用。
其中`data_file`文件存储的是图像文件的路径和标签,每一行的格式为:`image_path label`。中间间隔以`yaml`文件中`delimiter`参数作为间隔。
示例建库的配置如下
关于特征提取的具体模型参数,可查看`yaml`文件。
```yaml
# indexing engine config
......@@ -63,6 +85,10 @@ IndexProcess:
### 3.2 检索配置文件参数
检索的过程,融合到`PP-ShiTu`的整体流程中,请参考[README](../../../README_ch.md)`PP-ShiTu图像识别系统介绍`部分。检索具体使用操作请参考[识别快速开始文档](../quick_start/quick_start_recognition.md)
其中,检索部分配置如下,整体检索配置文件,请参考`deploy/configs/inference_*.yaml`文件。
```yaml
IndexProcess:
index_dir: "./recognition_demo_data_v1.1/gallery_logo/index/"
......
......@@ -259,7 +259,7 @@ python3.7 python/predict_system.py -c configs/inference_general.yaml -o Global.i
python3.7 python/build_gallery.py -c configs/build_general.yaml -o IndexProcess.data_file="./drink_dataset_v1.0/gallery/drink_label_all.txt" -o IndexProcess.index_dir="./drink_dataset_v1.0/index_all"
```
最终新的索引信息保存在文件夹 `./drink_dataset_v1.0/index_all` 中。
最终新的索引信息保存在文件夹 `./drink_dataset_v1.0/index_all` 中。具体`yaml`请参考[向量检索文档](../image_recognition_pipeline/vector_search.md)
<a name="基于新的索引库的图像识别"></a>
......
......@@ -24,15 +24,15 @@
| :--- | :--- | :----: | :--------: | :---- | :---- | :---- |
| ResNet |ResNet50_vd | 分类 | 支持 | 多机多卡 <br> 混合精度 | FPGM裁剪 <br> PACT量化| |
| MobileNetV3 |MobileNetV3_large_x1_0 | 分类 | 支持 | 多机多卡 <br> 混合精度 | FPGM裁剪 <br> PACT量化| |
| PPLCNet |PPLCNet_x2_5 | 分类 | 支持 | 多机多卡 <br> 混合精度 | FPGM裁剪 <br> PACT量化| |
| PPLCNet |PPLCNet_x2_5 | 分类 | 支持 | 多机多卡 <br> 混合精度 | - | |
## 3. 一键测试工具使用
## 3. 测试工具简介
### 目录介绍
```
./test_tipc/
├── common_func.sh #test_*.sh会调用到的公共函数
├── config # 配置文件目录
│ ├── MobileNetV3_large_x1_0 # MobileNetV3系列模型测试配置文件目录
│ ├── MobileNetV3 # MobileNetV3系列模型测试配置文件目录
│ │ ├── MobileNetV3_large_x1_0_train_infer_python.txt #基础训练预测配置文件
│ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件
│ │ └── MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件
......@@ -50,7 +50,7 @@
└── test_train_inference_python.sh # 测试python训练预测的主程序
```
### 测试流程
### 测试流程概述
使用本工具,可以测试不同功能的支持情况,以及预测结果是否对齐,测试流程如下:
<div align="center">
<img src="docs/test.png" width="800">
......@@ -60,16 +60,49 @@
2. 运行要测试的功能对应的测试脚本`test_*.sh`,产出log,由log可以看到不同配置是否运行成功;
3.`compare_results.py`对比log中的预测结果和预存在results目录下的结果,判断预测精度是否符合预期(在误差范围内)。
其中,有4个测试主程序,功能如下:
- `test_train_inference_python.sh`:测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。
- `test_inference_cpp.sh`:测试基于C++的模型推理。待支持
- `test_serving.sh`:测试基于Paddle Serving的服务化部署功能。待支持
- `test_lite.sh`:测试基于Paddle-Lite的端侧预测部署功能。待支持
测试单项功能仅需两行命令,**如需测试不同模型/功能,替换配置文件即可**,命令格式如下:
```shell
# 功能:准备数据
# 格式:bash + 运行脚本 + 参数1: 配置文件选择 + 参数2: 模式选择
bash test_tipc/prepare.sh configs/[model_name]/[params_file_name] [Mode]
# 功能:运行测试
# 格式:bash + 运行脚本 + 参数1: 配置文件选择 + 参数2: 模式选择
bash test_tipc/test_train_inference_python.sh configs/[model_name]/[params_file_name] [Mode]
```
例如,测试基本训练预测功能的`lite_train_lite_infer`模式,运行:
```shell
# 准备数据
bash test_tipc/prepare.sh ./test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt 'lite_train_lite_infer'
# 运行测试
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt 'lite_train_lite_infer'
```
关于本示例命令的更多信息可查看[基础训练预测使用文档](docs/test_train_inference_python.md)
### 配置文件命名规范
`configs`目录下,**按模型系列划分为子目录**,子目录中存放所有该模型系列测试需要用到的配置文件,如`MobileNetV3`文件夹下存放了所有`MobileNetV3`系列模型的配置文件。配置文件的命名遵循如下规范:
1. 基础训练预测配置简单命名为:`ModelName_train_infer_python.txt`,表示**Linux环境下单机、不使用混合精度训练+python预测**,其完整命名对应`ModelName_train_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt`,由于本配置文件使用频率较高,这里进行了名称简化。其中`ModelName`指具体模型名称
2. 其他带训练配置命名格式为:`ModelName_train_训练硬件环境(linux_gpu/linux_dcu/…)_是否多机(fleet/normal)_是否混合精度(amp/normal)_预测模式(infer/lite/serving/js)_语言(cpp/python/java)_预测硬件环境(ModelName_linux_gpu/mac/jetson/opencl_arm_gpu/...).txt`。如,linux gpu下多机多卡+混合精度链条测试对应配置 `ModelName_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt`,linux dcu下基础训练预测对应配置 `ModelName_train_linux_dcu_normal_normal_infer_python_linux_dcu.txt`
3. 仅预测的配置(如serving、lite等)命名格式:`ModelName_model_训练硬件环境(ModelName_linux_gpu/linux_dcu/…)_是否多机(fleet/normal)_是否混合精度(amp/normal)_(infer/lite/serving/js)_语言(cpp/python/java)_预测硬件环境(ModelName_linux_gpu/mac/jetson/opencl_arm_gpu/...).txt`,即,与2相比,仅第二个字段从train换为model,测试时模型直接下载获取,这里的“训练硬件环境”表示所测试的模型是在哪种环境下训练得到的。
**根据上述命名规范,可以直接从子目录名称和配置文件名找到需要测试的场景和功能对应的配置文件。**
<a name="more"></a>
#### 更多教程
## 4 开始测试
各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程:
[test_train_inference_python 使用](docs/test_train_inference_python.md)
[test_inference_cpp 使用](docs/test_inference_cpp.md)
[test_serving 使用](docs/test_serving.md)
[test_lite 使用](docs/test_lite.md)
- [test_train_inference_python 使用](docs/test_train_inference_python.md):测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。
- [test_inference_cpp 使用](docs/test_inference_cpp.md) :测试基于C++的模型推理。
- [test_serving 使用](docs/test_serving.md) :测试基于Paddle Serving的服务化部署功能。
- [test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md): 测试基于Paddle-Lite的ARM CPU端c++预测部署功能.
- [test_paddle2onnx 使用](docs/test_paddle2onnx.md):测试Paddle2ONNX的模型转化功能,并验证正确性。
......@@ -76,7 +76,7 @@ elif [ ${MODE} = "whole_train_whole_infer" ];then
ln -s whole_chain_CIFAR100 ILSVRC2012
cd ILSVRC2012
mv train.txt train_list.txt
mv val.txt val_list.txt
mv test.txt val_list.txt
cd ../../
fi
......
#!/bin/bash
source ./test_tipc/common_func.sh
FILENAME=$1
dataline=$(cat ${FILENAME})
# parser params
IFS=$'\n'
lines=(${dataline})
IFS=$'\n'
inference_cmd=$(func_parser_value "${lines[1]}")
DEVICE=$(func_parser_value "${lines[2]}")
det_lite_model_list=$(func_parser_value "${lines[3]}")
rec_lite_model_list=$(func_parser_value "${lines[4]}")
cls_lite_model_list=$(func_parser_value "${lines[5]}")
if [[ $inference_cmd =~ "det" ]];then
lite_model_list=${det_lite_model_list}
elif [[ $inference_cmd =~ "rec" ]];then
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
elif [[ $inference_cmd =~ "system" ]];then
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
else
echo "inference_cmd is wrong, please check."
exit 1
fi
if [ ${DEVICE} = "ARM_CPU" ];then
valid_targets="arm"
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz"
end_index="66"
elif [ ${DEVICE} = "ARM_GPU_OPENCL" ];then
valid_targets="opencl"
paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz"
end_index="71"
else
echo "DEVICE only suport ARM_CPU, ARM_GPU_OPENCL."
exit 2
fi
# prepare lite .nb model
pip install paddlelite==2.10-rc
current_dir=${PWD}
IFS="|"
model_path=./inference_models
for model in ${lite_model_list[*]}; do
if [[ $model =~ "PP-OCRv2" ]];then
inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar
elif [[ $model =~ "v2.0" ]];then
inference_model_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/${model}.tar
else
echo "Model is wrong, please check."
exit 3
fi
inference_model=${inference_model_url##*/}
wget -nc -P ${model_path} ${inference_model_url}
cd ${model_path} && tar -xf ${inference_model} && cd ../
model_dir=${model_path}/${inference_model%.*}
model_file=${model_dir}/inference.pdmodel
param_file=${model_dir}/inference.pdiparams
paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=${valid_targets} --optimize_out=${model_dir}_opt
done
# prepare test data
data_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
model_path=./inference_models
inference_model=${inference_model_url##*/}
data_file=${data_url##*/}
wget -nc -P ./inference_models ${inference_model_url}
wget -nc -P ./test_data ${data_url}
cd ./inference_models && tar -xf ${inference_model} && cd ../
cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../
# prepare lite env
paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
paddlelite_file=${paddlelite_zipfile:0:${end_index}}
wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile}
mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite
cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ${FILENAME} test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
cd ${paddlelite_file}/demo/cxx/ocr/
git clone https://github.com/cuicheng01/AutoLog.git
# make
make -j
sleep 1
make -j
cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
rm -rf ${paddlelite_file}* && rm -rf ${model_path}
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
dataline=$(awk 'NR==1, NR==16{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser cpp inference model
model_name=$(func_parser_value "${lines[1]}")
use_opencv=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir_list=$(func_parser_value "${lines[3]}")
cpp_infer_is_quant=$(func_parser_value "${lines[4]}")
# parser cpp inference
inference_cmd=$(func_parser_value "${lines[5]}")
cpp_use_gpu_key=$(func_parser_key "${lines[6]}")
cpp_use_gpu_list=$(func_parser_value "${lines[6]}")
cpp_use_mkldnn_key=$(func_parser_key "${lines[7]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[7]}")
cpp_cpu_threads_key=$(func_parser_key "${lines[8]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[8]}")
cpp_batch_size_key=$(func_parser_key "${lines[9]}")
cpp_batch_size_list=$(func_parser_value "${lines[9]}")
cpp_use_trt_key=$(func_parser_key "${lines[10]}")
cpp_use_trt_list=$(func_parser_value "${lines[10]}")
cpp_precision_key=$(func_parser_key "${lines[11]}")
cpp_precision_list=$(func_parser_value "${lines[11]}")
cpp_infer_model_key=$(func_parser_key "${lines[12]}")
cpp_image_dir_key=$(func_parser_key "${lines[13]}")
cpp_infer_img_dir=$(func_parser_value "${lines[13]}")
cpp_infer_key1=$(func_parser_key "${lines[14]}")
cpp_infer_value1=$(func_parser_value "${lines[14]}")
cpp_benchmark_key=$(func_parser_key "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[15]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp.log"
function func_cpp_inference(){
IFS='|'
_script=$1
_model_dir=$2
_log_path=$3
_img_dir=$4
_flag_quant=$5
# inference
for use_gpu in ${cpp_use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpp_cpu_threads_list[*]}; do
for batch_size in ${cpp_batch_size_list[*]}; do
precision="fp32"
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
precison="int8"
fi
_save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for use_trt in ${cpp_use_trt_list[*]}; do
for precision in ${cpp_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${cpp_precision_key}" "${precision}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
cd deploy/cpp_infer
if [ ${use_opencv} = "True" ]; then
if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
echo "################### build opencv skipped ###################"
else
echo "################### build opencv ###################"
rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
tar -xf opencv-3.4.7.tar.gz
cd opencv-3.4.7/
install_path=$(pwd)/opencv3
rm -rf build
mkdir build
cd build
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DWITH_IPP=OFF \
-DBUILD_IPP_IW=OFF \
-DWITH_LAPACK=OFF \
-DWITH_EIGEN=OFF \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DWITH_ZLIB=ON \
-DBUILD_ZLIB=ON \
-DWITH_JPEG=ON \
-DBUILD_JPEG=ON \
-DWITH_PNG=ON \
-DBUILD_PNG=ON \
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON
make -j
make install
cd ../
echo "################### build opencv finished ###################"
fi
fi
echo "################### build PaddleOCR demo ####################"
if [ ${use_opencv} = "True" ]; then
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
else
OPENCV_DIR=''
fi
LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
make -j
cd ../../../
echo "################### build PaddleOCR demo finished ###################"
# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env
echo "################### run test ###################"
export Count=0
IFS="|"
infer_quant_flag=(${cpp_infer_is_quant})
for infer_model in ${cpp_infer_model_dir_list[*]}; do
#run inference
is_quant=${infer_quant_flag[Count]}
func_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_infer_img_dir}" ${is_quant}
Count=$(($Count + 1))
done
#!/bin/bash
source test_tipc/common_func.sh
source test_tipc/test_train_inference_python.sh
FILENAME=$1
# MODE be one of ['whole_infer']
MODE=$2
dataline=$(awk 'NR==1, NR==17{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
infer_model_dir_list=$(func_parser_value "${lines[3]}")
infer_export_list=$(func_parser_value "${lines[4]}")
infer_is_quant=$(func_parser_value "${lines[5]}")
# parser inference
inference_py=$(func_parser_value "${lines[6]}")
use_gpu_key=$(func_parser_key "${lines[7]}")
use_gpu_list=$(func_parser_value "${lines[7]}")
use_mkldnn_key=$(func_parser_key "${lines[8]}")
use_mkldnn_list=$(func_parser_value "${lines[8]}")
cpu_threads_key=$(func_parser_key "${lines[9]}")
cpu_threads_list=$(func_parser_value "${lines[9]}")
batch_size_key=$(func_parser_key "${lines[10]}")
batch_size_list=$(func_parser_value "${lines[10]}")
use_trt_key=$(func_parser_key "${lines[11]}")
use_trt_list=$(func_parser_value "${lines[11]}")
precision_key=$(func_parser_key "${lines[12]}")
precision_list=$(func_parser_value "${lines[12]}")
infer_model_key=$(func_parser_key "${lines[13]}")
image_dir_key=$(func_parser_key "${lines[14]}")
infer_img_dir=$(func_parser_value "${lines[14]}")
save_log_key=$(func_parser_key "${lines[15]}")
benchmark_key=$(func_parser_key "${lines[16]}")
benchmark_value=$(func_parser_value "${lines[16]}")
infer_key1=$(func_parser_key "${lines[17]}")
infer_value1=$(func_parser_value "${lines[17]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log"
if [ ${MODE} = "whole_infer" ]; then
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
# set CUDA_VISIBLE_DEVICES
eval $env
export Count=0
IFS="|"
infer_run_exports=(${infer_export_list})
infer_quant_flag=(${infer_is_quant})
for infer_model in ${infer_model_dir_list[*]}; do
# run export
if [ ${infer_run_exports[Count]} != "null" ];then
save_infer_dir=$(dirname $infer_model)
set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
echo ${infer_run_exports[Count]}
echo $export_cmd
eval $export_cmd
status_export=$?
status_check $status_export "${export_cmd}" "${status_log}"
else
save_infer_dir=${infer_model}
fi
#run inference
is_quant=${infer_quant_flag[Count]}
if [ ${MODE} = "klquant_infer" ]; then
is_quant="True"
fi
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
Count=$(($Count + 1))
done
fi
#!/bin/bash
source ./common_func.sh
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
FILENAME=$1
dataline=$(cat $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser lite inference
inference_cmd=$(func_parser_value "${lines[1]}")
runtime_device=$(func_parser_value "${lines[2]}")
det_model_list=$(func_parser_value "${lines[3]}")
rec_model_list=$(func_parser_value "${lines[4]}")
cls_model_list=$(func_parser_value "${lines[5]}")
cpu_threads_list=$(func_parser_value "${lines[6]}")
det_batch_size_list=$(func_parser_value "${lines[7]}")
rec_batch_size_list=$(func_parser_value "${lines[8]}")
infer_img_dir_list=$(func_parser_value "${lines[9]}")
config_dir=$(func_parser_value "${lines[10]}")
rec_dict_dir=$(func_parser_value "${lines[11]}")
benchmark_value=$(func_parser_value "${lines[12]}")
if [[ $inference_cmd =~ "det" ]]; then
lite_model_list=${det_lite_model_list}
elif [[ $inference_cmd =~ "rec" ]]; then
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
elif [[ $inference_cmd =~ "system" ]]; then
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
else
echo "inference_cmd is wrong, please check."
exit 1
fi
LOG_PATH="./output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
function func_test_det(){
IFS='|'
_script=$1
_det_model=$2
_log_path=$3
_img_dir=$4
_config=$5
if [[ $_det_model =~ "slim" ]]; then
precision="INT8"
else
precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_runtime_device_${runtime_device}_precision_${precision}_det_batchsize_${det_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${runtime_device} ${precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_rec(){
IFS='|'
_script=$1
_rec_model=$2
_cls_model=$3
_log_path=$4
_img_dir=$5
_config=$6
_rec_dict_dir=$7
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_rec_model}_${cls_model}_runtime_device_${runtime_device}_precision_${_precision}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${rec_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_system(){
IFS='|'
_script=$1
_det_model=$2
_rec_model=$3
_cls_model=$4
_log_path=$5
_img_dir=$6
_config=$7
_rec_dict_dir=$8
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_${_rec_model}_${_cls_model}_runtime_device_${runtime_device}_precision_${_precision}_det_batchsize_${det_batchsize}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
done
}
echo "################### run test ###################"
if [[ $inference_cmd =~ "det" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_det "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "rec" ]]; then
IFS="|"
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_rec "${inference_cmd}" "${rec_model}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${rec_dict_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "system" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_system "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${rec_model_list[i]}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}" "${rec_dict_dir}"
done
done
fi
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
dataline=$(cat ${FILENAME})
lines=(${dataline})
# common params
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
# parser params
dataline=$(awk 'NR==1, NR==12{print}' $FILENAME)
IFS=$'\n'
lines=(${dataline})
# parser paddle2onnx
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
padlle2onnx_cmd=$(func_parser_value "${lines[3]}")
infer_model_dir_key=$(func_parser_key "${lines[4]}")
infer_model_dir_value=$(func_parser_value "${lines[4]}")
model_filename_key=$(func_parser_key "${lines[5]}")
model_filename_value=$(func_parser_value "${lines[5]}")
params_filename_key=$(func_parser_key "${lines[6]}")
params_filename_value=$(func_parser_value "${lines[6]}")
save_file_key=$(func_parser_key "${lines[7]}")
save_file_value=$(func_parser_value "${lines[7]}")
opset_version_key=$(func_parser_key "${lines[8]}")
opset_version_value=$(func_parser_value "${lines[8]}")
enable_onnx_checker_key=$(func_parser_key "${lines[9]}")
enable_onnx_checker_value=$(func_parser_value "${lines[9]}")
# parser onnx inference
inference_py=$(func_parser_value "${lines[10]}")
use_gpu_key=$(func_parser_key "${lines[11]}")
use_gpu_value=$(func_parser_value "${lines[11]}")
det_model_key=$(func_parser_key "${lines[12]}")
image_dir_key=$(func_parser_key "${lines[13]}")
image_dir_value=$(func_parser_value "${lines[13]}")
LOG_PATH="./test_tipc/output"
mkdir -p ./test_tipc/output
status_log="${LOG_PATH}/results_paddle2onnx.log"
function func_paddle2onnx(){
IFS='|'
_script=$1
# paddle2onnx
_save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log"
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_save_model=$(func_set_params "${save_file_key}" "${save_file_value}")
set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}")
set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}")
trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker}"
eval $trans_model_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${trans_model_cmd}" "${status_log}"
# python inference
set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu_value}")
set_model_dir=$(func_set_params "${det_model_key}" "${save_file_value}")
set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 "
eval $infer_model_cmd
status_check $last_status "${infer_model_cmd}" "${status_log}"
}
echo "################### run test ###################"
export Count=0
IFS="|"
func_paddle2onnx
\ No newline at end of file
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
dataline=$(awk 'NR==1, NR==18{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser serving
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
trans_model_py=$(func_parser_value "${lines[3]}")
infer_model_dir_key=$(func_parser_key "${lines[4]}")
infer_model_dir_value=$(func_parser_value "${lines[4]}")
model_filename_key=$(func_parser_key "${lines[5]}")
model_filename_value=$(func_parser_value "${lines[5]}")
params_filename_key=$(func_parser_key "${lines[6]}")
params_filename_value=$(func_parser_value "${lines[6]}")
serving_server_key=$(func_parser_key "${lines[7]}")
serving_server_value=$(func_parser_value "${lines[7]}")
serving_client_key=$(func_parser_key "${lines[8]}")
serving_client_value=$(func_parser_value "${lines[8]}")
serving_dir_value=$(func_parser_value "${lines[9]}")
web_service_py=$(func_parser_value "${lines[10]}")
web_use_gpu_key=$(func_parser_key "${lines[11]}")
web_use_gpu_list=$(func_parser_value "${lines[11]}")
web_use_mkldnn_key=$(func_parser_key "${lines[12]}")
web_use_mkldnn_list=$(func_parser_value "${lines[12]}")
web_cpu_threads_key=$(func_parser_key "${lines[13]}")
web_cpu_threads_list=$(func_parser_value "${lines[13]}")
web_use_trt_key=$(func_parser_key "${lines[14]}")
web_use_trt_list=$(func_parser_value "${lines[14]}")
web_precision_key=$(func_parser_key "${lines[15]}")
web_precision_list=$(func_parser_value "${lines[15]}")
pipeline_py=$(func_parser_value "${lines[16]}")
image_dir_key=$(func_parser_key "${lines[17]}")
image_dir_value=$(func_parser_value "${lines[17]}")
LOG_PATH="../../test_tipc/output"
mkdir -p ./test_tipc/output
status_log="${LOG_PATH}/results_serving.log"
function func_serving(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
# pdserving
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd
cd ${serving_dir_value}
echo $PWD
unset https_proxy
unset http_proxy
for python in ${python[*]}; do
if [ ${python} = "cpp"]; then
for use_gpu in ${web_use_gpu_list[*]}; do
if [ ${use_gpu} = "null" ]; then
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval $web_service_cmd
sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
else
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval $web_service_cmd
sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
fi
done
else
# python serving
for use_gpu in ${web_use_gpu_list[*]}; do
echo ${ues_gpu}
if [ ${use_gpu} = "null" ]; then
for use_mkldnn in ${web_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ]; then
continue
fi
for threads in ${web_cpu_threads_list[*]}; do
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &"
eval $web_service_cmd
sleep 2s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 "
eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done
done
elif [ ${use_gpu} = "0" ]; then
for use_trt in ${web_use_trt_list[*]}; do
for precision in ${web_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
continue
fi
set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${web_precision_key}" "${precision}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & "
eval $web_service_cmd
sleep 2s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1"
eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
fi
done
}
# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env
echo "################### run test ###################"
export Count=0
IFS="|"
func_serving "${web_service_cmd}"
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册