From b511d2dbe705a7b083daa093372a5ceec779af14 Mon Sep 17 00:00:00 2001 From: Channingss Date: Fri, 15 May 2020 02:30:08 +0000 Subject: [PATCH] add demo --- deploy/cpp/demo/classifier.cpp | 75 ++++++++++++++++++++++ deploy/cpp/demo/detector.cpp | 110 +++++++++++++++++++++++++++++++++ deploy/cpp/demo/segmenter.cpp | 87 ++++++++++++++++++++++++++ 3 files changed, 272 insertions(+) create mode 100644 deploy/cpp/demo/classifier.cpp create mode 100644 deploy/cpp/demo/detector.cpp create mode 100644 deploy/cpp/demo/segmenter.cpp diff --git a/deploy/cpp/demo/classifier.cpp b/deploy/cpp/demo/classifier.cpp new file mode 100644 index 0000000..badb835 --- /dev/null +++ b/deploy/cpp/demo/classifier.cpp @@ -0,0 +1,75 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include + +#include "include/paddlex/paddlex.h" + +DEFINE_string(model_dir, "", "Path of inference model"); +DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); +DEFINE_bool(use_trt, false, "Infering with TensorRT"); +DEFINE_int32(gpu_id, 0, "GPU card id"); +DEFINE_string(key, "", "key of encryption"); +DEFINE_string(image, "", "Path of test image file"); +DEFINE_string(image_list, "", "Path of test image list file"); + +int main(int argc, char** argv) { + // Parsing command-line + google::ParseCommandLineFlags(&argc, &argv, true); + + if (FLAGS_model_dir == "") { + std::cerr << "--model_dir need to be defined" << std::endl; + return -1; + } + if (FLAGS_image == "" & FLAGS_image_list == "") { + std::cerr << "--image or --image_list need to be defined" << std::endl; + return -1; + } + + // 加载模型 + PaddleX::Model model; + model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key); + + // 进行预测 + if (FLAGS_image_list != "") { + std::ifstream inf(FLAGS_image_list); + if (!inf) { + std::cerr << "Fail to open file " << FLAGS_image_list << std::endl; + return -1; + } + std::string image_path; + while (getline(inf, image_path)) { + PaddleX::ClsResult result; + cv::Mat im = cv::imread(image_path, 1); + model.predict(im, &result); + std::cout << "Predict label: " << result.category + << ", label_id:" << result.category_id + << ", score: " << result.score << std::endl; + } + } else { + PaddleX::ClsResult result; + cv::Mat im = cv::imread(FLAGS_image, 1); + model.predict(im, &result); + std::cout << "Predict label: " << result.category + << ", label_id:" << result.category_id + << ", score: " << result.score << std::endl; + } + + return 0; +} diff --git a/deploy/cpp/demo/detector.cpp b/deploy/cpp/demo/detector.cpp new file mode 100644 index 0000000..e5fc280 --- /dev/null +++ b/deploy/cpp/demo/detector.cpp @@ -0,0 +1,110 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include + +#include "include/paddlex/paddlex.h" +#include "include/paddlex/visualize.h" + +DEFINE_string(model_dir, "", "Path of inference model"); +DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); +DEFINE_bool(use_trt, false, "Infering with TensorRT"); +DEFINE_int32(gpu_id, 0, "GPU card id"); +DEFINE_string(key, "", "key of encryption"); +DEFINE_string(image, "", "Path of test image file"); +DEFINE_string(image_list, "", "Path of test image list file"); +DEFINE_string(save_dir, "output", "Path to save visualized image"); + +int main(int argc, char** argv) { + // 解析命令行参数 + google::ParseCommandLineFlags(&argc, &argv, true); + + if (FLAGS_model_dir == "") { + std::cerr << "--model_dir need to be defined" << std::endl; + return -1; + } + if (FLAGS_image == "" & FLAGS_image_list == "") { + std::cerr << "--image or --image_list need to be defined" << std::endl; + return -1; + } + + // 加载模型 + PaddleX::Model model; + model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key); + + auto colormap = PaddleX::GenerateColorMap(model.labels.size()); + std::string save_dir = "output"; + // 进行预测 + if (FLAGS_image_list != "") { + std::ifstream inf(FLAGS_image_list); + if (!inf) { + std::cerr << "Fail to open file " << FLAGS_image_list << std::endl; + return -1; + } + std::string image_path; + while (getline(inf, image_path)) { + PaddleX::DetResult result; + cv::Mat im = cv::imread(image_path, 1); + model.predict(im, &result); + for (int i = 0; i < result.boxes.size(); ++i) { + std::cout << "image file: " << image_path + << ", predict label: " << result.boxes[i].category + << ", label_id:" << result.boxes[i].category_id + << ", score: " << result.boxes[i].score << ", box:(" + << result.boxes[i].coordinate[0] << ", " + << result.boxes[i].coordinate[1] << ", " + << result.boxes[i].coordinate[2] << ", " + << result.boxes[i].coordinate[3] << ")" << std::endl; + } + + // 可视化 + cv::Mat vis_img = + PaddleX::Visualize(im, result, model.labels, colormap, 0.5); + std::string save_path = + PaddleX::generate_save_path(FLAGS_save_dir, image_path); + cv::imwrite(save_path, vis_img); + result.clear(); + std::cout << "Visualized output saved as " << save_path << std::endl; + } + } else { + PaddleX::DetResult result; + cv::Mat im = cv::imread(FLAGS_image, 1); + model.predict(im, &result); + for (int i = 0; i < result.boxes.size(); ++i) { + std::cout << ", predict label: " << result.boxes[i].category + << ", label_id:" << result.boxes[i].category_id + << ", score: " << result.boxes[i].score << ", box:(" + << result.boxes[i].coordinate[0] << ", " + << result.boxes[i].coordinate[1] << ", " + << result.boxes[i].coordinate[2] << ", " + << result.boxes[i].coordinate[3] << ")" << std::endl; + } + + // 可视化 + cv::Mat vis_img = + PaddleX::Visualize(im, result, model.labels, colormap, 0.5); + std::string save_path = + PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); + cv::imwrite(save_path, vis_img); + result.clear(); + std::cout << "Visualized output saved as " << save_path << std::endl; + } + + return 0; +} diff --git a/deploy/cpp/demo/segmenter.cpp b/deploy/cpp/demo/segmenter.cpp new file mode 100644 index 0000000..0492ef8 --- /dev/null +++ b/deploy/cpp/demo/segmenter.cpp @@ -0,0 +1,87 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include + +#include "include/paddlex/paddlex.h" +#include "include/paddlex/visualize.h" + +DEFINE_string(model_dir, "", "Path of inference model"); +DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); +DEFINE_bool(use_trt, false, "Infering with TensorRT"); +DEFINE_int32(gpu_id, 0, "GPU card id"); +DEFINE_string(key, "", "key of encryption"); +DEFINE_string(image, "", "Path of test image file"); +DEFINE_string(image_list, "", "Path of test image list file"); +DEFINE_string(save_dir, "output", "Path to save visualized image"); + +int main(int argc, char** argv) { + // 解析命令行参数 + google::ParseCommandLineFlags(&argc, &argv, true); + + if (FLAGS_model_dir == "") { + std::cerr << "--model_dir need to be defined" << std::endl; + return -1; + } + if (FLAGS_image == "" & FLAGS_image_list == "") { + std::cerr << "--image or --image_list need to be defined" << std::endl; + return -1; + } + + // 加载模型 + PaddleX::Model model; + model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key); + + auto colormap = PaddleX::GenerateColorMap(model.labels.size()); + // 进行预测 + if (FLAGS_image_list != "") { + std::ifstream inf(FLAGS_image_list); + if (!inf) { + std::cerr << "Fail to open file " << FLAGS_image_list << std::endl; + return -1; + } + std::string image_path; + while (getline(inf, image_path)) { + PaddleX::SegResult result; + cv::Mat im = cv::imread(image_path, 1); + model.predict(im, &result); + // 可视化 + cv::Mat vis_img = + PaddleX::Visualize(im, result, model.labels, colormap); + std::string save_path = + PaddleX::generate_save_path(FLAGS_save_dir, image_path); + cv::imwrite(save_path, vis_img); + result.clear(); + std::cout << "Visualized output saved as " << save_path << std::endl; + } + } else { + PaddleX::SegResult result; + cv::Mat im = cv::imread(FLAGS_image, 1); + model.predict(im, &result); + // 可视化 + cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); + std::string save_path = + PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); + cv::imwrite(save_path, vis_img); + result.clear(); + std::cout << "Visualized output saved as " << save_path << std::endl; + } + + return 0; +} -- GitLab