diff --git a/deploy/openvino/demo/classifier.cpp b/deploy/openvino/demo/classifier.cpp index 4dfbfac030d719f55c72b662461989e3873d9c66..50742dbf33ef48d257624e714933654b8476aa69 100755 --- a/deploy/openvino/demo/classifier.cpp +++ b/deploy/openvino/demo/classifier.cpp @@ -62,7 +62,7 @@ int main(int argc, char** argv) { model.predict(im, &result); std::cout << "Predict label: " << result.category << ", label_id:" << result.category_id - << ", score: " << result.score + << ", score: " << result.score << ", num_img: " << model.count_num_ << std::endl; } } else { diff --git a/deploy/openvino/demo/detector.cpp b/deploy/openvino/demo/detector.cpp index 35a84db8c55bdd82f0400367e8189fb876da8580..a1a4db7a42dcbadfca157d0a0c0da3a9805b420e 100644 --- a/deploy/openvino/demo/detector.cpp +++ b/deploy/openvino/demo/detector.cpp @@ -1,3 +1,17 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include #include @@ -41,7 +55,7 @@ int main(int argc, char** argv) { } // - PaddleX::Model model; + PaddleX::Model model; model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device); int imgs = 1; @@ -49,7 +63,7 @@ int main(int argc, char** argv) { // 进行预测 if (FLAGS_image_list != "") { std::ifstream inf(FLAGS_image_list); - if(!inf){ + if (!inf) { std::cerr << "Fail to open file " << FLAGS_image_list << std::endl; return -1; } @@ -58,16 +72,16 @@ int main(int argc, char** argv) { PaddleX::DetResult result; cv::Mat im = cv::imread(image_path, 1); model.predict(im, &result); - if(FLAGS_save_dir != ""){ - cv::Mat vis_img = - PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); + if (FLAGS_save_dir != "") { + cv::Mat vis_img = PaddleX::Visualize( + im, result, model.labels, colormap, FLAGS_threshold); std::string save_path = - PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); + PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); cv::imwrite(save_path, vis_img); std::cout << "Visualized output saved as " << save_path << std::endl; } } - }else { + } else { PaddleX::DetResult result; cv::Mat im = cv::imread(FLAGS_image, 1); model.predict(im, &result); @@ -81,10 +95,10 @@ int main(int argc, char** argv) { << result.boxes[i].coordinate[2] << ", " << result.boxes[i].coordinate[3] << ")" << std::endl; } - if(FLAGS_save_dir != ""){ + if (FLAGS_save_dir != "") { // 可视化 - cv::Mat vis_img = - PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); + cv::Mat vis_img = PaddleX::Visualize( + im, result, model.labels, colormap, FLAGS_threshold); std::string save_path = PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); cv::imwrite(save_path, vis_img); @@ -94,5 +108,3 @@ int main(int argc, char** argv) { } return 0; } - - diff --git a/deploy/openvino/demo/segmenter.cpp b/deploy/openvino/demo/segmenter.cpp index 389c920dc7045baea75d0cd99fa3d0d3eb965bbe..05b935aa0c55c4f0bb48c261c2633297f1f57ed4 100644 --- a/deploy/openvino/demo/segmenter.cpp +++ b/deploy/openvino/demo/segmenter.cpp @@ -50,12 +50,12 @@ int main(int argc, char** argv) { // std::cout << "init start" << std::endl; - PaddleX::Model model; + PaddleX::Model model; model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device); std::cout << "init done" << std::endl; int imgs = 1; auto colormap = PaddleX::GenerateColorMap(model.labels.size()); - + if (FLAGS_image_list != "") { std::ifstream inf(FLAGS_image_list); if (!inf) { @@ -69,7 +69,7 @@ int main(int argc, char** argv) { PaddleX::SegResult result; cv::Mat im = cv::imread(image_path, 1); model.predict(im, &result); - if(FLAGS_save_dir != ""){ + if (FLAGS_save_dir != "") { cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); std::string save_path = PaddleX::generate_save_path(FLAGS_save_dir, image_path); @@ -77,13 +77,11 @@ int main(int argc, char** argv) { std::cout << "Visualized output saved as " << save_path << std::endl; } } - }else{ + } else { PaddleX::SegResult result; cv::Mat im = cv::imread(FLAGS_image, 1); - std::cout << "predict start" << std::endl; model.predict(im, &result); - std::cout << "predict done" << std::endl; - if(FLAGS_save_dir != ""){ + if (FLAGS_save_dir != "") { cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); std::string save_path = PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); diff --git a/deploy/openvino/include/paddlex/config_parser.h b/deploy/openvino/include/paddlex/config_parser.h index 5303e4da7ac0eb3de73bc57059617d361065f136..850e46656d9efdb5374e3086757cb5350f0457b2 100755 --- a/deploy/openvino/include/paddlex/config_parser.h +++ b/deploy/openvino/include/paddlex/config_parser.h @@ -54,4 +54,4 @@ class ConfigPaser { YAML::Node Transforms_; }; -} // namespace PaddleDetection +} // namespace PaddleX diff --git a/deploy/openvino/include/paddlex/paddlex.h b/deploy/openvino/include/paddlex/paddlex.h index 2ae6d57e2381fb08b9baa62397a78ec927db5961..082f178923d34c1e615f52bd391dbe0829599e44 100755 --- a/deploy/openvino/include/paddlex/paddlex.h +++ b/deploy/openvino/include/paddlex/paddlex.h @@ -17,7 +17,8 @@ #include #include #include -#include +#include +#include #include "yaml-cpp/yaml.h" @@ -31,7 +32,7 @@ #include "include/paddlex/config_parser.h" #include "include/paddlex/results.h" #include "include/paddlex/transforms.h" -using namespace InferenceEngine; + namespace PaddleX { @@ -52,8 +53,8 @@ class Model { bool preprocess(cv::Mat* input_im, ImageBlob* inputs); bool predict(const cv::Mat& im, ClsResult* result); - - bool predict(const cv::Mat& im, DetResult* result); + + bool predict(const cv::Mat& im, DetResult* result); bool predict(const cv::Mat& im, SegResult* result); @@ -63,8 +64,8 @@ class Model { std::map labels; Transforms transforms_; ImageBlob inputs_; - Blob::Ptr output_; - CNNNetwork network_; - ExecutableNetwork executable_network_; + InferenceEngine::Blob::Ptr output_; + InferenceEngine::CNNNetwork network_; + InferenceEngine::ExecutableNetwork executable_network_; }; -} // namespce of PaddleX +} // namespace PaddleX diff --git a/deploy/openvino/include/paddlex/results.h b/deploy/openvino/include/paddlex/results.h index e7df0769cc438cb66d560742bf791dc70ceeb8f1..7a77e0e2df4dbe4889f7be176df173b00dc454fa 100755 --- a/deploy/openvino/include/paddlex/results.h +++ b/deploy/openvino/include/paddlex/results.h @@ -68,4 +68,4 @@ class SegResult : public BaseResult { score_map.clear(); } }; -} // namespce of PaddleX +} // namespace PaddleX diff --git a/deploy/openvino/include/paddlex/transforms.h b/deploy/openvino/include/paddlex/transforms.h index 11cde81a20ce0219d95c716f1632e20538e9868d..25c15d7c361523c207cc5d54271a420e21cf68e2 100755 --- a/deploy/openvino/include/paddlex/transforms.h +++ b/deploy/openvino/include/paddlex/transforms.h @@ -16,18 +16,17 @@ #include -#include -#include #include #include +#include +#include #include #include #include #include - #include -using namespace InferenceEngine; + namespace PaddleX { @@ -38,8 +37,7 @@ namespace PaddleX { class ImageBlob { public: // Original image height and width - //std::vector ori_im_size_ = std::vector(2); - Blob::Ptr ori_im_size_; + InferenceEngine::Blob::Ptr ori_im_size_; // Newest image height and width after process std::vector new_im_size_ = std::vector(2); @@ -50,7 +48,7 @@ class ImageBlob { // Resize scale float scale = 1.0; // Buffer for image data after preprocessing - Blob::Ptr blob; + InferenceEngine::Blob::Ptr blob; void clear() { im_size_before_resize_.clear(); @@ -90,7 +88,7 @@ class ResizeByShort : public Transform { } else { max_size_ = -1; } - }; + } virtual bool Run(cv::Mat* im, ImageBlob* data); private: @@ -196,12 +194,11 @@ class Padding : public Transform { } if (item["im_padding_value"].IsDefined()) { im_value_ = item["im_padding_value"].as>(); - } - else { + } else { im_value_ = {0, 0, 0}; } } - + virtual bool Run(cv::Mat* im, ImageBlob* data); private: diff --git a/deploy/openvino/src/paddlex.cpp b/deploy/openvino/src/paddlex.cpp index 465a64bcec540b92bdabcdbf9aba8675189921f2..c7a9a2636134e696fb3aea494de6844a018934b1 100755 --- a/deploy/openvino/src/paddlex.cpp +++ b/deploy/openvino/src/paddlex.cpp @@ -16,36 +16,37 @@ #include #include -using namespace InferenceEngine; namespace PaddleX { void Model::create_predictor(const std::string& model_dir, const std::string& cfg_dir, std::string device) { - Core ie; - network_ = ie.ReadNetwork(model_dir, model_dir.substr(0, model_dir.size() - 4) + ".bin"); + InferenceEngine::Core ie; + network_ = ie.ReadNetwork( + model_dir, model_dir.substr(0, model_dir.size() - 4) + ".bin"); network_.setBatchSize(1); - InputsDataMap inputInfo(network_.getInputsInfo()); + InferenceEngine::InputsDataMap inputInfo(network_.getInputsInfo()); std::string imageInputName; for (const auto & inputInfoItem : inputInfo) { - if (inputInfoItem.second->getTensorDesc().getDims().size() == 4){ + if (inputInfoItem.second->getTensorDesc().getDims().size() == 4) { imageInputName = inputInfoItem.first; inputInfoItem.second->setPrecision(Precision::FP32); - inputInfoItem.second->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR); + inputInfoItem.second->getPreProcess().setResizeAlgorithm( + RESIZE_BILINEAR); inputInfoItem.second->setLayout(Layout::NCHW); } - if (inputInfoItem.second->getTensorDesc().getDims().size() == 2){ + if (inputInfoItem.second->getTensorDesc().getDims().size() == 2) { imageInputName = inputInfoItem.first; inputInfoItem.second->setPrecision(Precision::FP32); } } - if(device == "MYRIAD"){ + if (device == "MYRIAD") { std::map networkConfig; networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "ON"; executable_network_ = ie.LoadNetwork(network_, device, networkConfig); - }else{ + } else { executable_network_ = ie.LoadNetwork(network_, device); } load_config(cfg_dir); @@ -98,7 +99,8 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { return false; } // 处理输入图像 - InferRequest infer_request = executable_network_.CreateInferRequest(); + InferenceEngine::InferRequest infer_request = + executable_network_.CreateInferRequest(); std::string input_name = network_.getInputsInfo().begin()->first; inputs_.blob = infer_request.GetBlob(input_name); cv::Mat im_clone = im.clone(); @@ -106,14 +108,13 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { std::cerr << "Preprocess failed!" << std::endl; return false; } - infer_request.Infer(); - std::string output_name = network_.getOutputsInfo().begin()->first; output_ = infer_request.GetBlob(output_name); - MemoryBlob::CPtr moutput = as(output_); + InferenceEngine::MemoryBlob::CPtr moutput = + as(output_); auto moutputHolder = moutput->rmap(); float* outputs_data = moutputHolder.as(); @@ -122,7 +123,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { result->category_id = std::distance(outputs_data, ptr); result->score = *ptr; result->category = labels[result->category_id]; - + return true; } bool Model::predict(const cv::Mat& im, DetResult* result) { @@ -137,46 +138,45 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { "to function predict()!" << std::endl; return false; } - InferRequest infer_request = executable_network_.CreateInferRequest(); - InputsDataMap input_maps = network_.getInputsInfo(); + InferenceEngine::InferRequest infer_request = + executable_network_.CreateInferRequest(); + InferenceEngine::InputsDataMap input_maps = network_.getInputsInfo(); std::string inputName; for (const auto & input_map : input_maps) { - if (input_map.second->getTensorDesc().getDims().size() == 4){ + if (input_map.second->getTensorDesc().getDims().size() == 4) { inputName = input_map.first; inputs_.blob = infer_request.GetBlob(inputName); } - if (input_map.second->getTensorDesc().getDims().size() == 2){ + if (input_map.second->getTensorDesc().getDims().size() == 2) { inputName = input_map.first; inputs_.ori_im_size_ = infer_request.GetBlob(inputName); } - } + } cv::Mat im_clone = im.clone(); if (!preprocess(&im_clone, &inputs_)) { std::cerr << "Preprocess failed!" << std::endl; return false; } - infer_request.Infer(); - - OutputsDataMap out_map = network_.getOutputsInfo(); + InferenceEngine::OutputsDataMap out_map = network_.getOutputsInfo(); auto iter = out_map.begin(); std::string outputName = iter->first; - std::cout << "output: " << outputName << std::endl; - Blob::Ptr output = infer_request.GetBlob(outputName); - MemoryBlob::CPtr moutput = as(output); - TensorDesc blob_output = moutput->getTensorDesc(); + InferenceEngine::Blob::Ptr output = infer_request.GetBlob(outputName); + InferenceEngine::MemoryBlob::CPtr moutput = + as(output); + InferenceEngine::TensorDesc blob_output = moutput->getTensorDesc(); std::vector output_shape = blob_output.getDims(); auto moutputHolder = moutput->rmap(); float* data = moutputHolder.as(); int size = 1; - for (auto& i : output_shape){ + for (auto& i : output_shape) { size *= static_cast(i); } int num_boxes = size / 6; - for (int i = 0; i < num_boxes; ++i){ - if(data[i * 6] > 0){ + for (int i = 0; i < num_boxes; ++i) { + if (data[i * 6] > 0) { Box box; box.category_id = static_cast(data[i * 6]); box.category = labels[box.category_id]; @@ -207,10 +207,11 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { return false; } // - InferRequest infer_request = executable_network_.CreateInferRequest(); + InferenceEngine::InferRequest infer_request = + executable_network_.CreateInferRequest(); std::string input_name = network_.getInputsInfo().begin()->first; inputs_.blob = infer_request.GetBlob(input_name); - + // cv::Mat im_clone = im.clone(); if (!preprocess(&im_clone, &inputs_)) { @@ -220,19 +221,19 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { // infer_request.Infer(); - if (count_num_ >= 20){ + if (count_num_ >= 20) { total_time_ = total_time_ + time_used.count(); } - - - OutputsDataMap out_map = network_.getOutputsInfo(); + + OInferenceEngine::utputsDataMap out_map = network_.getOutputsInfo(); auto iter = out_map.begin(); iter++; std::string output_name_score = iter->first; - std::cout << iter->first << std::endl; - Blob::Ptr output_score = infer_request.GetBlob(output_name_score); - MemoryBlob::CPtr moutput_score = as(output_score); - TensorDesc blob_score = moutput_score->getTensorDesc(); + InferenceEngine::Blob::Ptr output_score = + infer_request.GetBlob(output_name_score); + InferenceEngine::MemoryBlob::CPtr moutput_score = + as(output_score); + InferenceEngine::TensorDesc blob_score = moutput_score->getTensorDesc(); std::vector output_score_shape = blob_score.getDims(); int size = 1; for (auto& i : output_score_shape) { @@ -242,14 +243,15 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { result->score_map.data.resize(size); auto moutputHolder_score = moutput_score->rmap(); float* score_data = moutputHolder_score.as(); - memcpy(result->score_map.data.data(),score_data,moutput_score->byteSize()); + memcpy(result->score_map.data.data(), score_data, moutput_score->byteSize()); iter++; std::string output_name_label = iter->first; - std::cout << iter->first << std::endl; - Blob::Ptr output_label = infer_request.GetBlob(output_name_label); - MemoryBlob::CPtr moutput_label = as(output_label); - TensorDesc blob_label = moutput_label->getTensorDesc(); + InferenceEngine::Blob::Ptr output_label = + infer_request.GetBlob(output_name_label); + InferenceEngine::MemoryBlob::CPtr moutput_label = + as(output_label); + InferenceEngine::TensorDesc blob_label = moutput_label->getTensorDesc(); std::vector output_label_shape = blob_label.getDims(); size = 1; for (auto& i : output_label_shape) { @@ -259,7 +261,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { result->label_map.data.resize(size); auto moutputHolder_label = moutput_label->rmap(); int* label_data = moutputHolder_label.as(); - memcpy(result->label_map.data.data(),label_data,moutput_label->byteSize()); + memcpy(result->label_map.data.data(), label_data, moutput_label->byteSize()); @@ -315,5 +317,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { result->score_map.shape = {mask_score.rows, mask_score.cols}; return true; } - -} // namespce of PaddleX +} // namespace PaddleX diff --git a/deploy/openvino/src/transforms.cpp b/deploy/openvino/src/transforms.cpp index f68ac32aabe5ec897f26dad8e9ebbfe4ae544b8b..0bbb21a1899edb07bd82f26c912241069864bead 100755 --- a/deploy/openvino/src/transforms.cpp +++ b/deploy/openvino/src/transforms.cpp @@ -12,13 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "include/paddlex/transforms.h" + +#include + #include #include #include #include -#include -#include "include/paddlex/transforms.h" namespace PaddleX { @@ -28,7 +30,7 @@ std::map interpolations = {{"LINEAR", cv::INTER_LINEAR}, {"CUBIC", cv::INTER_CUBIC}, {"LANCZOS4", cv::INTER_LANCZOS4}}; -bool Normalize::Run(cv::Mat* im, ImageBlob* data){ +bool Normalize::Run(cv::Mat* im, ImageBlob* data) { for (int h = 0; h < im->rows; h++) { for (int w = 0; w < im->cols; w++) { im->at(h, w)[0] = @@ -62,16 +64,13 @@ float ResizeByShort::GenerateScale(const cv::Mat& im) { bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) { data->im_size_before_resize_.push_back({im->rows, im->cols}); data->reshape_order_.push_back("resize"); - float scale = GenerateScale(*im); int width = static_cast(round(scale * im->cols)); int height = static_cast(round(scale * im->rows)); cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); - data->new_im_size_[0] = im->rows; data->new_im_size_[1] = im->cols; data->scale = scale; - return true; } @@ -166,7 +165,8 @@ bool Resize::Run(cv::Mat* im, ImageBlob* data) { return true; } -void Transforms::Init(const YAML::Node& transforms_node, std::string type, bool to_rgb) { +void Transforms::Init( + const YAML::Node& transforms_node, std::string type, bool to_rgb) { transforms_.clear(); to_rgb_ = to_rgb; type_ = type; @@ -206,20 +206,18 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB); } (*im).convertTo(*im, CV_32FC3); - - if(type_ == "detector" ){ - LockedMemory input2Mapped = as(data->ori_im_size_)->wmap(); + if (type_ == "detector") { + LockedMemory input2Mapped = as( + data->ori_im_size_)->wmap(); float *p = input2Mapped.as(); p[0] = im->rows; p[1] = im->cols; } - //data->ori_im_size_[0] = im->rows; - //data->ori_im_size_[1] = im->cols; data->new_im_size_[0] = im->rows; data->new_im_size_[1] = im->cols; for (int i = 0; i < transforms_.size(); ++i) { - if (!transforms_[i]->Run(im,data)) { + if (!transforms_[i]->Run(im, data)) { std::cerr << "Apply transforms to image failed!" << std::endl; return false; } @@ -227,15 +225,14 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { // 将图像由NHWC转为NCHW格式 // 同时转为连续的内存块存储到Blob - - SizeVector blobSize = data->blob->getTensorDesc().getDims(); + InferenceEngine::SizeVector blobSize = data->blob->getTensorDesc().getDims(); const size_t width = blobSize[3]; const size_t height = blobSize[2]; const size_t channels = blobSize[1]; - MemoryBlob::Ptr mblob = InferenceEngine::as(data->blob); + MemoryBlob::Ptr mblob = InferenceEngine::as( + data->blob); auto mblobHolder = mblob->wmap(); float *blob_data = mblobHolder.as(); - for (size_t c = 0; c < channels; c++) { for (size_t h = 0; h < height; h++) { for (size_t w = 0; w < width; w++) { diff --git a/deploy/raspberry/demo/classifier.cpp b/deploy/raspberry/demo/classifier.cpp index 710e109f89c635aaef4c8d9620d2f7ecd9a4653b..c5c365162141ef0e5f0e2f4488a08271fd3dd520 100755 --- a/deploy/raspberry/demo/classifier.cpp +++ b/deploy/raspberry/demo/classifier.cpp @@ -63,7 +63,7 @@ int main(int argc, char** argv) { model.predict(im, &result); std::cout << "Predict label: " << result.category << ", label_id:" << result.category_id - << ", score: " << result.score + << ", score: " << result.score << ", num_img: " << model.count_num_ << std::endl; } } else { diff --git a/deploy/raspberry/demo/detector.cpp b/deploy/raspberry/demo/detector.cpp index 70ef3d937d16be67c385d11dfd68134854bb112f..7887dcb342968b6da5e197f224f0d1c2bc3b0be5 100755 --- a/deploy/raspberry/demo/detector.cpp +++ b/deploy/raspberry/demo/detector.cpp @@ -55,7 +55,7 @@ int main(int argc, char** argv) { } // - PaddleX::Model model; + PaddleX::Model model; model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_thread_num); int imgs = 1; @@ -63,7 +63,7 @@ int main(int argc, char** argv) { // 进行预测 if (FLAGS_image_list != "") { std::ifstream inf(FLAGS_image_list); - if(!inf){ + if (!inf) { std::cerr << "Fail to open file " << FLAGS_image_list << std::endl; return -1; } @@ -73,16 +73,16 @@ int main(int argc, char** argv) { PaddleX::DetResult result; cv::Mat im = cv::imread(image_path, 1); model.predict(im, &result); - if(FLAGS_save_dir != ""){ - cv::Mat vis_img = - PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); + if (FLAGS_save_dir != "") { + cv::Mat vis_img = PaddleX::Visualize( + im, result, model.labels, colormap, FLAGS_threshold); std::string save_path = - PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); + PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); cv::imwrite(save_path, vis_img); std::cout << "Visualized output saved as " << save_path << std::endl; } } - }else { + } else { PaddleX::DetResult result; cv::Mat im = cv::imread(FLAGS_image, 1); model.predict(im, &result); @@ -96,10 +96,10 @@ int main(int argc, char** argv) { << result.boxes[i].coordinate[2] << ", " << result.boxes[i].coordinate[3] << ")" << std::endl; } - if(FLAGS_save_dir != ""){ + if (FLAGS_save_dir != "") { // 可视化 - cv::Mat vis_img = - PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold); + cv::Mat vis_img = PaddleX::Visualize( + im, result, model.labels, colormap, FLAGS_threshold); std::string save_path = PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); cv::imwrite(save_path, vis_img); @@ -109,5 +109,3 @@ int main(int argc, char** argv) { } return 0; } - - diff --git a/deploy/raspberry/demo/segmenter.cpp b/deploy/raspberry/demo/segmenter.cpp index 2cd1ba7a6b8dfde09a91bc70aa654cc91dca32e4..2f6ede99f558db99b1c2daabfdb7161c441ef240 100755 --- a/deploy/raspberry/demo/segmenter.cpp +++ b/deploy/raspberry/demo/segmenter.cpp @@ -49,12 +49,11 @@ int main(int argc, char** argv) { // std::cout << "init start" << std::endl; - PaddleX::Model model; + PaddleX::Model model; model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_thread_num); std::cout << "init done" << std::endl; int imgs = 1; auto colormap = PaddleX::GenerateColorMap(model.labels.size()); - if (FLAGS_image_list != "") { std::ifstream inf(FLAGS_image_list); if (!inf) { @@ -67,23 +66,19 @@ int main(int argc, char** argv) { PaddleX::SegResult result; cv::Mat im = cv::imread(image_path, 1); model.predict(im, &result); - if(FLAGS_save_dir != ""){ + if (FLAGS_save_dir != "") { cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); std::string save_path = PaddleX::generate_save_path(FLAGS_save_dir, image_path); cv::imwrite(save_path, vis_img); std::cout << "Visualized output saved as " << save_path << std::endl; } - } - - }else{ + } else { PaddleX::SegResult result; cv::Mat im = cv::imread(FLAGS_image, 1); - std::cout << "predict start" << std::endl; model.predict(im, &result); - std::cout << "predict done" << std::endl; - if(FLAGS_save_dir != ""){ + if (FLAGS_save_dir != "") { cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); std::string save_path = PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image); diff --git a/deploy/raspberry/include/paddlex/config_parser.h b/deploy/raspberry/include/paddlex/config_parser.h index 5303e4da7ac0eb3de73bc57059617d361065f136..850e46656d9efdb5374e3086757cb5350f0457b2 100755 --- a/deploy/raspberry/include/paddlex/config_parser.h +++ b/deploy/raspberry/include/paddlex/config_parser.h @@ -54,4 +54,4 @@ class ConfigPaser { YAML::Node Transforms_; }; -} // namespace PaddleDetection +} // namespace PaddleX diff --git a/deploy/raspberry/include/paddlex/paddlex.h b/deploy/raspberry/include/paddlex/paddlex.h index cac9ee2a7dc4967c2966623232ebbb2ffba8111b..2fc5070e401a3e31d62fc548b98f853c6dee7166 100755 --- a/deploy/raspberry/include/paddlex/paddlex.h +++ b/deploy/raspberry/include/paddlex/paddlex.h @@ -14,26 +14,35 @@ #pragma once +#include +#include + #include #include #include -#include +#include +#include +#include + +#include "include/paddlex/config_parser.h" +#include "include/paddlex/results.h" +#include "include/paddlex/transforms.h" + + #include "yaml-cpp/yaml.h" + + + #ifdef _WIN32 #define OS_PATH_SEP "\\" #else #define OS_PATH_SEP "/" #endif -#include "paddle_api.h" -#include -#include "include/paddlex/config_parser.h" -#include "include/paddlex/results.h" -#include "include/paddlex/transforms.h" -using namespace paddle::lite_api; + namespace PaddleX { @@ -54,8 +63,8 @@ class Model { bool preprocess(cv::Mat* input_im, ImageBlob* inputs); bool predict(const cv::Mat& im, ClsResult* result); - - bool predict(const cv::Mat& im, DetResult* result); + + bool predict(const cv::Mat& im, DetResult* result); bool predict(const cv::Mat& im, SegResult* result); @@ -65,7 +74,6 @@ class Model { std::map labels; Transforms transforms_; ImageBlob inputs_; - std::shared_ptr predictor_; - + std::shared_ptr predictor_; }; -} // namespce of PaddleX +} // namespace PaddleX diff --git a/deploy/raspberry/include/paddlex/results.h b/deploy/raspberry/include/paddlex/results.h index de90c4a85130f42c0201f0d671fd3e2d53b0f37d..099e2c98b4b99c68b48c4dd99c8fbdfa1d2cf4fa 100755 --- a/deploy/raspberry/include/paddlex/results.h +++ b/deploy/raspberry/include/paddlex/results.h @@ -68,4 +68,4 @@ class SegResult : public BaseResult { score_map.clear(); } }; -} // namespce of PaddleX +} // namespace PaddleX diff --git a/deploy/raspberry/include/paddlex/transforms.h b/deploy/raspberry/include/paddlex/transforms.h index 3a4636643fb77327e1c9f701cc73532fd152b773..d9b4d66b7fc064a7a92296f454cba58c7f2a7e75 100755 --- a/deploy/raspberry/include/paddlex/transforms.h +++ b/deploy/raspberry/include/paddlex/transforms.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include @@ -26,9 +27,7 @@ #include #include -#include "paddle_api.h" -using namespace paddle::lite_api; namespace PaddleX { @@ -90,7 +89,7 @@ class ResizeByShort : public Transform { } else { max_size_ = -1; } - }; + } virtual bool Run(cv::Mat* im, ImageBlob* data); private: @@ -196,12 +195,11 @@ class Padding : public Transform { } if (item["im_padding_value"].IsDefined()) { im_value_ = item["im_padding_value"].as>(); - } - else { + } else { im_value_ = {0, 0, 0}; } } - + virtual bool Run(cv::Mat* im, ImageBlob* data); private: diff --git a/deploy/raspberry/src/paddlex.cpp b/deploy/raspberry/src/paddlex.cpp index 5d56ad20857cdcc3861a62f69a6c2bbefa1d1b89..5b6cd2ec5c6a64a76751a97cb2a58d73af957fdf 100755 --- a/deploy/raspberry/src/paddlex.cpp +++ b/deploy/raspberry/src/paddlex.cpp @@ -16,18 +16,17 @@ #include #include -using namespace paddle::lite_api; namespace PaddleX { void Model::create_predictor(const std::string& model_dir, const std::string& cfg_dir, int thread_num) { - MobileConfig config; + paddle::lite_api::MobileConfig config; config.set_model_from_file(model_dir); - config.set_threads(thread_num); + config.set_threads(thread_num); load_config(cfg_dir); - predictor_ = CreatePaddlePredictor(config); + predictor_ = CreatePaddlePredictor(config); } bool Model::load_config(const std::string& cfg_dir) { @@ -83,12 +82,11 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { std::cerr << "Preprocess failed!" << std::endl; return false; } - - ; - predictor_->Run(); + predictor_->Run(); - std::unique_ptr output_tensor(std::move(predictor_->GetOutput(0))); + std::unique_ptr output_tensor( + std::move(predictor_->GetOutput(0))); const float *outputs_data = output_tensor->mutable_data(); @@ -97,10 +95,6 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) { result->category_id = std::distance(outputs_data, ptr); result->score = *ptr; result->category = labels[result->category_id]; - //for (int i=0;iGetInput(0)); cv::Mat im_clone = im.clone(); @@ -126,18 +119,14 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { int h = inputs_.new_im_size_[0]; int w = inputs_.new_im_size_[1]; if (name == "YOLOv3") { - std::unique_ptr im_size_tensor(std::move(predictor_->GetInput(1))); - const std::vector IM_SIZE_SHAPE = {1,2}; + std::unique_ptr im_size_tensor( + std::move(predictor_->GetInput(1))); + const std::vector IM_SIZE_SHAPE = {1, 2}; im_size_tensor->Resize(IM_SIZE_SHAPE); auto *im_size_data = im_size_tensor->mutable_data(); memcpy(im_size_data, inputs_.ori_im_size_.data(), 1*2*sizeof(int)); } - - predictor_->Run(); - - - auto output_names = predictor_->GetOutputNames(); auto output_box_tensor = predictor_->GetTensor(output_names[0]); const float *output_box = output_box_tensor->mutable_data(); @@ -177,27 +166,17 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { "function predict()!" << std::endl; return false; } - inputs_.input_tensor_ = std::move(predictor_->GetInput(0)); - - cv::Mat im_clone = im.clone(); if (!preprocess(&im_clone, &inputs_)) { std::cerr << "Preprocess failed!" << std::endl; return false; } std::cout << "Preprocess is done" << std::endl; - - predictor_->Run(); - - - auto output_names = predictor_->GetOutputNames(); auto output_label_tensor = predictor_->GetTensor(output_names[0]); - std::cout << "output0" << output_names[0] << std::endl; - std::cout << "output1" << output_names[1] << std::endl; const int64_t *label_data = output_label_tensor->mutable_data(); std::vector output_label_shape = output_label_tensor->shape(); int size = 1; @@ -272,5 +251,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) { result->score_map.shape = {mask_score.rows, mask_score.cols}; return true; } - -} // namespce of PaddleX +} // namespace PaddleX diff --git a/deploy/raspberry/src/transforms.cpp b/deploy/raspberry/src/transforms.cpp index b7605a6a5745124aeb81a5bc8478f2e58833f901..c51ad29f76c43355a7557876f687dee0f1943cd7 100755 --- a/deploy/raspberry/src/transforms.cpp +++ b/deploy/raspberry/src/transforms.cpp @@ -12,12 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. + +#include "include/paddlex/transforms.h" + +#include + #include #include #include -#include -#include "include/paddlex/transforms.h" + namespace PaddleX { @@ -27,7 +31,7 @@ std::map interpolations = {{"LINEAR", cv::INTER_LINEAR}, {"CUBIC", cv::INTER_CUBIC}, {"LANCZOS4", cv::INTER_LANCZOS4}}; -bool Normalize::Run(cv::Mat* im, ImageBlob* data){ +bool Normalize::Run(cv::Mat* im, ImageBlob* data) { for (int h = 0; h < im->rows; h++) { for (int w = 0; w < im->cols; w++) { im->at(h, w)[0] = @@ -66,11 +70,9 @@ bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) { int width = static_cast(round(scale * im->cols)); int height = static_cast(round(scale * im->rows)); cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); - data->new_im_size_[0] = im->rows; data->new_im_size_[1] = im->cols; data->scale = scale; - return true; } @@ -204,7 +206,6 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB); } (*im).convertTo(*im, CV_32FC3); - data->ori_im_size_[0] = im->rows; data->ori_im_size_[1] = im->cols; data->new_im_size_[0] = im->rows; @@ -219,14 +220,12 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { // 将图像由NHWC转为NCHW格式 // 同时转为连续的内存块存储到Blob - int height = im->rows; int width = im->cols; - int channels = im->channels(); + int channels = im->channels(); const std::vector INPUT_SHAPE = {1, channels, height, width}; data->input_tensor_->Resize(INPUT_SHAPE); auto *input_data = data->input_tensor_->mutable_data(); - for (size_t c = 0; c < channels; c++) { for (size_t h = 0; h < height; h++) { for (size_t w = 0; w < width; w++) {