From d7d3fbeaa9f26a9445b9ae28218f6a016e98686d Mon Sep 17 00:00:00 2001 From: wangxinxin08 <69842442+wangxinxin08@users.noreply.github.com> Date: Fri, 11 Sep 2020 21:42:39 +0800 Subject: [PATCH] modify deploy inference code (#1394) fix compile error in code modify code for inference --- deploy/cpp/include/object_detector.h | 10 +++-- deploy/cpp/src/main.cc | 58 ++++++++++++++---------- deploy/cpp/src/object_detector.cc | 66 +++++++++++++++++++++------- 3 files changed, 92 insertions(+), 42 deletions(-) diff --git a/deploy/cpp/include/object_detector.h b/deploy/cpp/include/object_detector.h index 82d860f8d..d3f24a4f2 100644 --- a/deploy/cpp/include/object_detector.h +++ b/deploy/cpp/include/object_detector.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -74,9 +75,12 @@ class ObjectDetector { const int gpu_id=0); // Run predictor - void Predict( - const cv::Mat& img, - std::vector* result); + void Predict(const cv::Mat& im, + const double threshold = 0.5, + const int warmup = 0, + const int repeats = 1, + const bool run_benchmark = false, + std::vector* result = nullptr); // Get Model Label list const std::vector& GetLabelList() const { diff --git a/deploy/cpp/src/main.cc b/deploy/cpp/src/main.cc index 90dfcf97f..08bd7daa1 100644 --- a/deploy/cpp/src/main.cc +++ b/deploy/cpp/src/main.cc @@ -29,6 +29,9 @@ DEFINE_bool(use_camera, false, "Use camera or not"); DEFINE_string(run_mode, "fluid", "Mode of running(fluid/trt_fp32/trt_fp16)"); DEFINE_int32(gpu_id, 0, "Device id of GPU to execute"); DEFINE_int32(camera_id, -1, "Device id of camera to predict"); +DEFINE_bool(run_benchmark, false, "Whether to predict a image_file repeatedly for benchmark"); +DEFINE_double(threshold, 0.5, "Threshold of score."); +DEFINE_string(output_dir, "output", "Directory of output visualization files."); void PredictVideo(const std::string& video_path, PaddleDetection::ObjectDetector* det) { @@ -72,7 +75,7 @@ void PredictVideo(const std::string& video_path, if (frame.empty()) { break; } - det->Predict(frame, &result); + det->Predict(frame, 0.5, 0, 1, false, &result); cv::Mat out_im = PaddleDetection::VisualizeResult( frame, result, labels, colormap); for (const auto& item : result) { @@ -93,31 +96,40 @@ void PredictVideo(const std::string& video_path, } void PredictImage(const std::string& image_path, - PaddleDetection::ObjectDetector* det) { + const double threshold, + const bool run_benchmark, + PaddleDetection::ObjectDetector* det, + const std::string& output_dir = "output") { // Open input image as an opencv cv::Mat object cv::Mat im = cv::imread(image_path, 1); // Store all detected result std::vector result; - det->Predict(im, &result); - for (const auto& item : result) { - printf("class=%d confidence=%.4f rect=[%d %d %d %d]\n", - item.class_id, - item.confidence, - item.rect[0], - item.rect[1], - item.rect[2], - item.rect[3]); + if (run_benchmark) + { + det->Predict(im, threshold, 100, 100, run_benchmark, &result); + }else + { + det->Predict(im, 0.5, 0, 1, run_benchmark, &result); + for (const auto& item : result) { + printf("class=%d confidence=%.4f rect=[%d %d %d %d]\n", + item.class_id, + item.confidence, + item.rect[0], + item.rect[1], + item.rect[2], + item.rect[3]); + } + // Visualization result + auto labels = det->GetLabelList(); + auto colormap = PaddleDetection::GenerateColorMap(labels.size()); + cv::Mat vis_img = PaddleDetection::VisualizeResult( + im, result, labels, colormap); + std::vector compression_params; + compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); + compression_params.push_back(95); + cv::imwrite(output_dir + "/output.jpg", vis_img, compression_params); + printf("Visualized output saved as output.jpg\n"); } - // Visualization result - auto labels = det->GetLabelList(); - auto colormap = PaddleDetection::GenerateColorMap(labels.size()); - cv::Mat vis_img = PaddleDetection::VisualizeResult( - im, result, labels, colormap); - std::vector compression_params; - compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); - compression_params.push_back(95); - cv::imwrite("output.jpg", vis_img, compression_params); - printf("Visualized output saved as output.jpg\n"); } int main(int argc, char** argv) { @@ -139,10 +151,10 @@ int main(int argc, char** argv) { PaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_run_mode, FLAGS_gpu_id); // Do inference on input video or image - if (!FLAGS_video_path.empty() or FLAGS_use_camera) { + if (!FLAGS_video_path.empty() || FLAGS_use_camera) { PredictVideo(FLAGS_video_path, &det); } else if (!FLAGS_image_path.empty()) { - PredictImage(FLAGS_image_path, &det); + PredictImage(FLAGS_image_path, FLAGS_threshold, FLAGS_run_benchmark, &det, FLAGS_output_dir); } return 0; } diff --git a/deploy/cpp/src/object_detector.cc b/deploy/cpp/src/object_detector.cc index bffd5cc55..2b18de3bb 100644 --- a/deploy/cpp/src/object_detector.cc +++ b/deploy/cpp/src/object_detector.cc @@ -39,7 +39,7 @@ void ObjectDetector::LoadModel(const std::string& model_dir, printf("TensorRT int8 mode is not supported now, " "please use 'trt_fp32' or 'trt_fp16' instead"); } else { - if (run_mode != "trt_32") { + if (run_mode != "trt_fp32") { printf("run_mode should be 'fluid', 'trt_fp32' or 'trt_fp16'"); } } @@ -56,6 +56,7 @@ void ObjectDetector::LoadModel(const std::string& model_dir, } config.SwitchUseFeedFetchOps(false); config.SwitchSpecifyInputNames(true); + config.DisableGlogInfo(); // Memory optimization config.EnableMemoryOptim(); predictor_ = std::move(CreatePaddlePredictor(config)); @@ -155,7 +156,11 @@ void ObjectDetector::Postprocess( } void ObjectDetector::Predict(const cv::Mat& im, - std::vector* result) { + const double threshold, + const int warmup, + const int repeats, + const bool run_benchmark, + std::vector* result) { // Preprocess image Preprocess(im); // Prepare input tensor @@ -182,24 +187,53 @@ void ObjectDetector::Predict(const cv::Mat& im, } } // Run predictor - predictor_->ZeroCopyRun(); - // Get output tensor - auto output_names = predictor_->GetOutputNames(); - auto out_tensor = predictor_->GetOutputTensor(output_names[0]); - std::vector output_shape = out_tensor->shape(); - // Calculate output length - int output_size = 1; - for (int j = 0; j < output_shape.size(); ++j) { - output_size *= output_shape[j]; + for (int i = 0; i < warmup; i++) + { + predictor_->ZeroCopyRun(); + // Get output tensor + auto output_names = predictor_->GetOutputNames(); + auto out_tensor = predictor_->GetOutputTensor(output_names[0]); + std::vector output_shape = out_tensor->shape(); + // Calculate output length + int output_size = 1; + for (int j = 0; j < output_shape.size(); ++j) { + output_size *= output_shape[j]; + } + + if (output_size < 6) { + std::cerr << "[WARNING] No object detected." << std::endl; + } + output_data_.resize(output_size); + out_tensor->copy_to_cpu(output_data_.data()); } - if (output_size < 6) { - std::cerr << "[WARNING] No object detected." << std::endl; + std::clock_t start = clock(); + for (int i = 0; i < repeats; i++) + { + predictor_->ZeroCopyRun(); + // Get output tensor + auto output_names = predictor_->GetOutputNames(); + auto out_tensor = predictor_->GetOutputTensor(output_names[0]); + std::vector output_shape = out_tensor->shape(); + // Calculate output length + int output_size = 1; + for (int j = 0; j < output_shape.size(); ++j) { + output_size *= output_shape[j]; + } + + if (output_size < 6) { + std::cerr << "[WARNING] No object detected." << std::endl; + } + output_data_.resize(output_size); + out_tensor->copy_to_cpu(output_data_.data()); } - output_data_.resize(output_size); - out_tensor->copy_to_cpu(output_data_.data()); + std::clock_t end = clock(); + float ms = static_cast(end - start) / CLOCKS_PER_SEC / repeats * 1000.; + printf("Inference: %f ms per batch image\n", ms); // Postprocessing result - Postprocess(im, result); + if(!run_benchmark) { + Postprocess(im, result); + } } std::vector GenerateColorMap(int num_class) { -- GitLab