未验证 提交 f3a03618 编写于 作者: B Birdylx 提交者: GitHub

[TIPC] add tipc c++ infer for msvsr (#676)

* add tipc c++ infer for msvsr

* add tipc c++ infer for msvsr
上级 eebf94d9
project(vsr CXX C)
cmake_minimum_required(VERSION 3.14)
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
SET(PADDLE_LIB "" CACHE PATH "Location of libraries")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(CUDA_LIB "" CACHE PATH "Location of libraries")
SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
set(DEMO_NAME "vsr")
macro(safe_set_static_flag)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endmacro()
if (WITH_MKL)
ADD_DEFINITIONS(-DUSE_MKL)
endif()
if(NOT DEFINED PADDLE_LIB)
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
endif()
if(NOT DEFINED OPENCV_DIR)
message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
endif()
if (WIN32)
include_directories("${PADDLE_LIB}/paddle/include")
link_directories("${PADDLE_LIB}/paddle/lib")
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
else ()
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
include_directories("${PADDLE_LIB}/paddle/include")
link_directories("${PADDLE_LIB}/paddle/lib")
endif ()
include_directories(${OpenCV_INCLUDE_DIRS})
if (WIN32)
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
if(WITH_MKL)
set(FLAG_OPENMP "/openmp")
endif()
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
if (WITH_STATIC_LIB)
safe_set_static_flag()
add_definitions(-DSTATIC_LIB)
endif()
message("cmake c debug flags " ${CMAKE_C_FLAGS_DEBUG})
message("cmake c release flags " ${CMAKE_C_FLAGS_RELEASE})
message("cmake cxx debug flags " ${CMAKE_CXX_FLAGS_DEBUG})
message("cmake cxx release flags " ${CMAKE_CXX_FLAGS_RELEASE})
else()
if(WITH_MKL)
set(FLAG_OPENMP "-fopenmp")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o3 ${FLAG_OPENMP} -std=c++11")
set(CMAKE_STATIC_LIBRARY_PREFIX "")
message("cmake cxx flags" ${CMAKE_CXX_FLAGS})
endif()
if (WITH_GPU)
if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "")
message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda-8.0/lib64")
endif()
if (NOT WIN32)
if (NOT DEFINED CUDNN_LIB)
message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn_v7.4/cuda/lib64")
endif()
endif(NOT WIN32)
endif()
include_directories("${PADDLE_LIB}/third_party/install/protobuf/include")
include_directories("${PADDLE_LIB}/third_party/install/glog/include")
include_directories("${PADDLE_LIB}/third_party/install/gflags/include")
include_directories("${PADDLE_LIB}/third_party/install/xxhash/include")
include_directories("${PADDLE_LIB}/third_party/install/zlib/include")
include_directories("${PADDLE_LIB}/third_party/boost")
include_directories("${PADDLE_LIB}/third_party/eigen3")
include_directories("${CMAKE_SOURCE_DIR}/")
if (NOT WIN32)
if (WITH_TENSORRT AND WITH_GPU)
include_directories("${TENSORRT_DIR}/include")
link_directories("${TENSORRT_DIR}/lib")
endif()
endif(NOT WIN32)
link_directories("${PADDLE_LIB}/third_party/install/zlib/lib")
link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
link_directories("${PADDLE_LIB}/third_party/install/xxhash/lib")
link_directories("${PADDLE_LIB}/paddle/lib")
if(WITH_MKL)
include_directories("${PADDLE_LIB}/third_party/install/mklml/include")
if (WIN32)
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.lib
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.lib)
else ()
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
execute_process(COMMAND cp -r ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
endif ()
set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn")
if(EXISTS ${MKLDNN_PATH})
include_directories("${MKLDNN_PATH}/include")
if (WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else ()
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif ()
endif()
else()
if (WIN32)
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
else ()
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif ()
endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_inference.so/a
if(WITH_STATIC_LIB)
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
else()
if(WIN32)
set(DEPS
${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
set(DEPS
${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif(WITH_STATIC_LIB)
if (NOT WIN32)
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf z xxhash
)
if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib")
set(DEPS ${DEPS} snappystream)
endif()
if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib")
set(DEPS ${DEPS} snappy)
endif()
else()
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags_static libprotobuf xxhash)
set(DEPS ${DEPS} libcmt shlwapi)
if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib")
set(DEPS ${DEPS} snappy)
endif()
if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib")
set(DEPS ${DEPS} snappystream)
endif()
endif(NOT WIN32)
if(WITH_GPU)
if(NOT WIN32)
if (WITH_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDNN_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
if (NOT WIN32)
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
set(DEPS ${DEPS} ${EXTERNAL_LIB})
endif()
set(DEPS ${DEPS} ${OpenCV_LIBS})
AUX_SOURCE_DIRECTORY(./src SRCS)
add_executable(${DEMO_NAME} ${SRCS})
target_link_libraries(${DEMO_NAME} ${DEPS})
if (WIN32 AND WITH_MKL)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
)
endif()
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <numeric>
using namespace std;
class Normalize {
public:
virtual void Run(cv::Mat *im, const std::vector<float> &mean,
const std::vector<float> &scale, const bool is_scale = true);
};
// RGB -> CHW
class Permute {
public:
virtual void Run(const cv::Mat *im, float *data);
};
#include <string>
#include <vector>
#include <memory>
#include <utility>
#include <ctime>
#include <numeric>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "include/process_op.h"
#include "paddle_inference_api.h"
namespace PaddleGAN {
class VSR {
public:
explicit VSR(const std::string& model_path,
const std::string& param_path,
const std::string& device,
const int& gpu_id,
const bool& use_mkldnn,
const int& cpu_threads) {
this->device_ = device;
this->gpu_id_ = gpu_id;
this->use_mkldnn_ = use_mkldnn_;
this->cpu_threads_ = cpu_threads;
LoadModel(model_path, param_path);
}
// Load paddle inference model
void LoadModel(const std::string& model_path, const std::string& param_path);
// Run predictor
void Run(const std::vector<cv::Mat>& imgs, std::vector<cv::Mat>* result = nullptr);
private:
std::shared_ptr<paddle_infer::Predictor> predictor_;
std::string device_ = "GPU";
int gpu_id_ = 0;
bool use_mkldnn_ = false;
int cpu_threads_ = 1;
std::vector<float> mean_ = {0., 0., 0.};
std::vector<float> scale_ = {1., 1., 1.};
// pre/post-process
Permute permute_op_;
Normalize normalize_op_;
std::vector<float> Preprocess(cv::Mat& img);
};
}
#include <glog/logging.h>
#include <iostream>
#include <string>
#include <vector>
#include <numeric>
#include <sys/types.h>
#include <sys/stat.h>
#include <math.h>
#include <algorithm>
#include "include/vsr.h"
#include <gflags/gflags.h>
DEFINE_string(model_path, "", "Path of inference model");
DEFINE_string(param_path, "", "Path of inference param");
DEFINE_int32(frame_num, 2, "frame_num");
DEFINE_string(video_path, "", "Path of input video, `video_file` or `camera_id` has a highest priority.");
DEFINE_string(device, "CPU", "Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU.");
DEFINE_string(output_dir, "output", "Directory of output visualization files.");
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute");
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU");
DEFINE_int32(cpu_threads, 1, "Num of threads with CPU");
void main_predict(const std::string& video_path,
PaddleGAN::VSR* vsr,
const std::string& output_dir = "output") {
// Open video
cv::VideoCapture capture;
std::string video_out_name = "output.mp4";
capture.open(video_path.c_str());
if (!capture.isOpened()) {
printf("can not open video : %s\n", video_path.c_str());
return;
}
// Get Video info :fps, frame count
int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
int video_frame_count = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_COUNT));
// Set fixed size for output frame, only for msvsr model
int out_width = 1280;
int out_height = 720;
printf("fps: %d, frame_count: %d\n", video_fps, video_frame_count);
// Create VideoWriter for output
cv::VideoWriter video_out;
std::string video_out_path(output_dir);
video_out_path += video_out_name;
video_out.open(video_out_path,
0x00000021,
video_fps,
cv::Size(out_width, out_height),
true);
if (!video_out.isOpened()) {
printf("create video writer failed!\n");
return;
}
// Capture all frames and do inference
cv::Mat frame;
int frame_id = 0;
bool reach_end = false;
while (capture.isOpened()) {
std::vector<cv::Mat> imgs;
for (int i = 0; i < FLAGS_frame_num; i++) {
capture.read(frame);
if (!frame.empty()) {
imgs.push_back(frame);
}
else {
reach_end = true;
}
}
if (reach_end) {
break;
}
std::vector<cv::Mat> result;
vsr->Run(imgs, &result);
for (auto& item : result) {
cv::Mat temp = cv::Mat::zeros(item.size(), CV_8UC3);
item.convertTo(temp, CV_8UC3, 255);
video_out.write(temp);
printf("Processing frame: %d\n", frame_id);
// auto im_nm = std::to_string(frame_id) + "test.jpg";
// cv::imwrite(FLAGS_output_dir + im_nm, temp);
frame_id += 1;
}
}
printf("inference finished, output video saved at %s", video_out_path.c_str());
capture.release();
video_out.release();
}
int main(int argc, char** argv) {
// Parsing command-line
google::ParseCommandLineFlags(&argc, &argv, true);
// Load model and create a vsr
PaddleGAN::VSR vsr(FLAGS_model_path, FLAGS_param_path, FLAGS_device, FLAGS_gpu_id, FLAGS_use_mkldnn,
FLAGS_cpu_threads);
// Do inference on input video or image
main_predict(FLAGS_video_path, &vsr, FLAGS_output_dir);
return 0;
}
\ No newline at end of file
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <math.h>
#include <numeric>
#include <include/process_op.h>
// RGB -> CHW
void Permute::Run(const cv::Mat *im, float *data) {
int rh = im->rows;
int rw = im->cols;
int ch = im->channels();
for (int i = 0; i < ch; ++i) {
cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i);
}
}
void Normalize::Run(cv::Mat *im, const std::vector<float> &mean,
const std::vector<float> &scale, const bool is_scale) {
double e = 1.0;
if (is_scale) {
e /= 255.0;
}
(*im).convertTo(*im, CV_32FC3, e);
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
(im->at<cv::Vec3f>(h, w)[0] - mean[0]) * scale[0];
im->at<cv::Vec3f>(h, w)[1] =
(im->at<cv::Vec3f>(h, w)[1] - mean[1]) * scale[1];
im->at<cv::Vec3f>(h, w)[2] =
(im->at<cv::Vec3f>(h, w)[2] - mean[2]) * scale[2];
}
}
}
#include "include/vsr.h"
#include <iostream>
namespace PaddleGAN {
// VSR load model and initialize predictor
void VSR::LoadModel(const std::string& model_path,
const std::string& param_path) {
paddle_infer::Config config;
config.SetModel(model_path, param_path);
if (this->device_ == "GPU") {
config.EnableUseGpu(200, this->gpu_id_);
}
else {
config.DisableGpu();
if (this->use_mkldnn_) {
config.EnableMKLDNN();
// cache 10 for mkldnn to avoid memory leak; copy from paddleseg
config.SetMkldnnCacheCapacity(10);
}
config.SetCpuMathLibraryNumThreads(this->cpu_threads_);
}
config.SwitchUseFeedFetchOps(false);
config.SwitchIrOptim(true);
config.EnableMemoryOptim();
config.DisableGlogInfo();
this->predictor_ = paddle_infer::CreatePredictor(config);
}
std::vector<float> VSR::Preprocess(cv::Mat& img) {
cv::Mat new_img;
img.copyTo(new_img);
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
// transform 1 image
this->normalize_op_.Run(&new_img, this->mean_, this->scale_, true);
std::vector<float> unroll(1 * 3 * new_img.rows * new_img.cols, 0.0f);
this->permute_op_.Run(&new_img, unroll.data());
return unroll;
}
void VSR::Run(const std::vector<cv::Mat>& imgs, std::vector<cv::Mat>* result) {
int frame_num = imgs.size();
int rows = imgs[0].rows;
int cols = imgs[0].cols;
// Preprocess
// initialize a fixed size unroll vector to store processed img
std::vector<float> in_data_all;
for (int i = 0; i < frame_num; i++) {
cv::Mat im = imgs[i];
std::vector<float> unroll = this->Preprocess(im);
in_data_all.insert(in_data_all.end(), unroll.begin(), unroll.end());
}
// Set input
auto input_names = this->predictor_->GetInputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, frame_num, 3, rows, cols});
input_t->CopyFromCpu(in_data_all.data());
// Run
this->predictor_->Run();
// Get output
auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
std::vector<float> out_data;
out_data.resize(out_num);
output_t->CopyToCpu(out_data.data());
// group to image
cv::Mat res = cv::Mat::zeros(output_shape[3], output_shape[4], CV_32FC3); // RGB image
int pix_num = output_shape[3] * output_shape[4];
int frame_pix_num = pix_num * 3;
for (int frame = 0; frame < output_shape[1]; frame++) {
int index = 0;
for (int h = 0; h < output_shape[3]; ++h) {
for (int w = 0; w < output_shape[4]; ++w) {
res.at<cv::Vec3f>(h, w) = {out_data[2*pix_num+index+frame_pix_num*frame], out_data[pix_num+index+frame_pix_num*frame], out_data[index+frame_pix_num*frame]};
index+=1;
}
}
result->push_back(res);
}
}
}
\ No newline at end of file
......@@ -71,4 +71,5 @@ test_tipc/
<a name="more"></a>
#### 更多教程
各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程:
- [test_train_inference_python 使用](docs/test_train_inference_python.md): 测试基于Python的模型训练、评估、推理等基本功能
- [test_train_inference_python 使用](docs/test_train_inference_python.md) 测试基于Python的模型训练、推理等基本功能。
- [test_inference_cpp 使用](docs/test_inference_cpp.md) 测试基于C++的模型推理功能。
===========================cpp_infer_params===========================
model_name:msvsr
inference:./deploy/cpp_infer/build/vsr
--infer_model_path:./inference/msvsr/multistagevsrmodel_generator.pdmodel
--infer_param_path:./inference/msvsr/multistagevsrmodel_generator.pdiparams
--video_path:./data/low_res.mp4
--output_dir:./test_tipc/output/msvsr
--frame_num:2
--device:GPU
--gpu_id:1
--use_mkldnn:True
--cpu_threads:1
# C++预测功能测试
C++预测功能测试的主程序为`test_inference_cpp.sh`,可以测试基于C++预测库的模型推理功能。
## 1. 测试结论汇总
| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 |
| :----: | :----: | :----: | :----: | :----: | :----: |
| 正常模型 | GPU | 1 | - | - | - |
| 正常模型 | CPU | 1 | - | fp32 | 支持 |
## 2. 测试流程
运行环境配置请参考[文档](../../docs/zh_CN/install.md)的内容安装PaddleGAN,TIPC推荐的环境:
- PaddlePaddle=2.3.1
- CUDA=10.2
- cuDNN=7.6.5
### 2.1 功能测试
先运行`prepare.sh`准备数据和模型,然后运行`test_inference_cpp.sh`进行测试,msvsr模型的具体测试如下
```bash
# 准备模型和数据
bash test_tipc/test_inference_cpp.sh test_tipc/configs/msvsr/inference_cpp.txt
# cpp推理测试,可修改inference_cpp.txt配置累测试不同配置下的推理结果
bash test_tipc/test_inference_cpp.sh test_tipc/configs/msvsr/inference_cpp.txt
```
运行预测指令后,在`test_tipc/output`文件夹下自动会保存运行日志和输出结果,包括以下文件:
```shell
test_tipc/output
├── infer_cpp/results_cpp_infer.log # 运行指令状态的日志
├── infer_cpp/infer_cpp_GPU.log # 使用GPU推理测试的日志
├── infer_cpp/infer_cpp_CPU_use_mkldnn_threads_1.log # 使用CPU开启mkldnn,thread为1的推理测试日志
├── output.mp4 # 视频超分预测结果
......
```
其中results_cpp_infer.log中包含了每条指令的运行状态,如果运行成功会输出:
```
Run successfully with command - ./deploy/cpp_infer/build/vsr --model_path=./inference/msvsr/multistagevsrmodel_generator.pdmodel --param_path=./inference/msvsr/multistagevsrmodel_generator.pdiparams --video_path=./data/low_res.mp4 --output_dir=./test_tipc/output/msvsr --frame_num=2 --device=GPU --gpu_id=1 --use_mkldnn=True --cpu_threads=1 > ./test_tipc/output/infer_cpp/infer_cpp_GPU.log 2>&1!
......
```
如果运行失败,会输出:
```
Run failed with command - ./deploy/cpp_infer/build/vsr --model_path=./inference/msvsr/multistagevsrmodel_generator.pdmodel --param_path=./inference/msvsr/multistagevsrmodel_generator.pdiparams --video_path=./data/low_res.mp4 --output_dir=./test_tipc/output/msvsr --frame_num=2 --device=GPU --gpu_id=1 --use_mkldnn=True --cpu_threads=1 > ./test_tipc/output/infer_cpp/infer_cpp_GPU.log 2>&1!
......
```
可以根据results_cpp_infer.log中的内容判定哪一个指令运行错误。
......@@ -2,7 +2,7 @@
FILENAME=$1
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',
# 'whole_infer']
# 'whole_infer', 'benchmark_train', 'cpp_infer']
MODE=$2
......@@ -31,13 +31,6 @@ model_name=$(func_parser_value "${lines[1]}")
trainer_list=$(func_parser_value "${lines[14]}")
if [ ${MODE} = "benchmark_train" ];then
pip install -v -e .
MODE="lite_train_lite_infer"
fi
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',
# 'whole_infer
if [ ${MODE} = "lite_train_lite_infer" ];then
......@@ -173,9 +166,15 @@ elif [ ${MODE} = "whole_infer" ];then
mv ./data/SinGAN-official_images/Images/stone.png ./data/singan
fi
elif [ ${MODE} = "benchmark_train" ];then
if [ ${model_name} = "msvsr" ]; then
if [ ${model_name} == "msvsr" ]; then
rm -rf ./data/reds*
wget -nc -P ./data/ https://paddlegan.bj.bcebos.com/datasets/reds_lite.tar --no-check-certificate
cd ./data/ && tar xf reds_lite.tar && cd ../
fi
elif [ ${MODE} = "cpp_infer" ]; then
if [ ${model_name} == "msvsr" ]; then
rm -rf ./inference/msvsr*
wget -nc -P ./inference https://paddlegan.bj.bcebos.com/static_model/msvsr.tar --no-check-certificate
cd ./inference && tar xf msvsr.tar && cd ../
fi
fi
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
MODE=$2
dataline=$(awk 'NR==1, NR==18{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser cpp inference params
model_name=$(func_parser_value "${lines[1]}")
infer_cmd=$(func_parser_value "${lines[2]}")
model_path=$(func_parser_value "${lines[3]}")
param_path=$(func_parser_value "${lines[4]}")
video_path=$(func_parser_value "${lines[5]}")
output_dir=$(func_parser_value "${lines[6]}")
frame_num=$(func_parser_value "${lines[7]}")
device=$(func_parser_value "${lines[8]}")
gpu_id=$(func_parser_value "${lines[9]}")
use_mkldnn=$(func_parser_value "${lines[10]}")
cpu_threads=$(func_parser_value "${lines[11]}")
LOG_PATH="./test_tipc/output/infer_cpp"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp_infer.log"
function func_cpp_inference(){
# set log
if [ ${device} = "GPU" ]; then
_save_log_path="${LOG_PATH}/infer_cpp_${device}.log"
elif [ ${device} = "CPU" ]; then
_save_log_path="${LOG_PATH}/infer_cpp_${device}_usemkldnn_${usemkldnn}_threads_${cpu_threads}.log"
fi
# set params
set_model_path=$(func_set_params "--model_path" "${model_path}")
set_param_path=$(func_set_params "--param_path" "${param_path}")
set_video_path=$(func_set_params "--video_path" "${video_path}")
set_output_dir=$(func_set_params "--output_dir" "${output_dir}")
set_frame_num=$(func_set_params "--frame_num" "${frame_num}")
set_device=$(func_set_params "--device" "${device}")
set_gpu_id=$(func_set_params "--gpu_id" "${gpu_id}")
set_use_mkldnn=$(func_set_params "--use_mkldnn" "${use_mkldnn}")
set_cpu_threads=$(func_set_params "--cpu_threads" "${cpu_threads}")
# run infer
cmd="${infer_cmd} ${set_model_path} ${set_param_path} ${set_video_path} ${set_output_dir} ${set_frame_num} ${set_device} ${set_gpu_id} ${set_use_mkldnn} ${set_cpu_threads} > ${_save_log_path} 2>&1"
eval $cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${cmd}" "${status_log}" "${model_name}"
}
cd deploy/cpp_infer
if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
echo "################### build opencv skipped ###################"
else
echo "################### building opencv ###################"
rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
tar -xf opencv-3.4.7.tar.gz
cd opencv-3.4.7/
install_path=$(pwd)/opencv3
rm -rf build
mkdir build
cd build
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DWITH_IPP=OFF \
-DBUILD_IPP_IW=OFF \
-DWITH_LAPACK=OFF \
-DWITH_EIGEN=OFF \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DWITH_ZLIB=ON \
-DBUILD_ZLIB=ON \
-DWITH_JPEG=ON \
-DBUILD_JPEG=ON \
-DWITH_PNG=ON \
-DBUILD_PNG=ON \
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON \
-DWITH_FFMPEG=ON
make -j
make install
cd ../../
echo "################### building opencv finished ###################"
fi
if [ -d "paddle_inference" ]; then
echo "################### download inference lib skipped ###################"
else
echo "################### downloading inference lib ###################"
wget -nc https://paddle-inference-lib.bj.bcebos.com/2.3.1/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz
tar -xf paddle_inference.tgz
echo "################### downloading inference lib finished ###################"
fi
echo "################### building PaddleGAN demo ####################"
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3
LIB_DIR=$(pwd)/paddle_inference
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
TENSORRT_DIR=''
export LD_LIBRARY_PATH=$(dirname `find ${PWD} -name libonnxruntime.so.1.11.1`):"$LD_LIBRARY_PATH"
export LD_LIBRARY_PATH=$(dirname `find ${PWD} -name libpaddle2onnx.so.0.9.9`):"$LD_LIBRARY_PATH"
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=ON \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR}
make -j
cd ../
echo "################### building PaddleGAN demo finished ###################"
echo "################### running test ###################"
cd ../../
func_cpp_inference
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册