diff --git a/deploy/openvino/include/paddlex/paddlex.h b/deploy/openvino/include/paddlex/paddlex.h index e09ff615e160b233925c24f8a9faeb2269e8463b..3d5dba109221d872fa595996bb5e849370dc4dfb 100644 --- a/deploy/openvino/include/paddlex/paddlex.h +++ b/deploy/openvino/include/paddlex/paddlex.h @@ -56,9 +56,8 @@ class Model { std::string name; std::vector labels; Transforms transforms_; - ImageBlob inputs_; - float* outputs_; - ExecutableNetwork executable_network_; + Blob::Ptr inputs_; + Blob::Ptr output_ CNNNetwork network_; }; } // namespce of PaddleX diff --git a/deploy/openvino/include/paddlex/transforms.h b/deploy/openvino/include/paddlex/transforms.h index df8cff59afacbe313f1eec5bc9835a736442583e..23bdac1593ee20fda2183d383d846837df24c0fb 100644 --- a/deploy/openvino/include/paddlex/transforms.h +++ b/deploy/openvino/include/paddlex/transforms.h @@ -26,32 +26,10 @@ #include #include -namespace PaddleX { +#include +using namespace InferenceEngine; -// Object for storing all preprocessed data -class ImageBlob { - public: - // Original image height and width - std::vector ori_im_size_ = std::vector(2); - // Newest image height and width after process - std::vector new_im_size_ = std::vector(2); - // Image height and width before resize - std::vector> im_size_before_resize_; - // Reshape order - std::vector reshape_order_; - // Resize scale - float scale = 1.0; - // Buffer for image data after preprocessing - std::vector im_data_; - - void clear() { - ori_im_size_.clear(); - new_im_size_.clear(); - im_size_before_resize_.clear(); - reshape_order_.clear(); - im_data_.clear(); - } -}; +namespace PaddleX { // Abstraction of preprocessing opration class class Transform { @@ -74,34 +52,6 @@ class Normalize : public Transform { std::vector std_; }; -class ResizeByShort : public Transform { - public: - virtual void Init(const YAML::Node& item) { - short_size_ = item["short_size"].as(); - if (item["max_size"].IsDefined()) { - max_size_ = item["max_size"].as(); - } else { - max_size_ = -1; - } - }; - virtual bool Run(cv::Mat* im, ImageBlob* data); - - private: - float GenerateScale(const cv::Mat& im); - int short_size_; - int max_size_; -}; - -class ResizeByLong : public Transform { - public: - virtual void Init(const YAML::Node& item) { - long_size_ = item["long_size"].as(); - }; - virtual bool Run(cv::Mat* im, ImageBlob* data); - - private: - int long_size_; -}; class Resize : public Transform { public: @@ -147,44 +97,11 @@ class CenterCrop : public Transform { int width_; }; -class Padding : public Transform { - public: - virtual void Init(const YAML::Node& item) { - if (item["coarsest_stride"].IsDefined()) { - coarsest_stride_ = item["coarsest_stride"].as(); - if (coarsest_stride_ < 1) { - std::cerr << "[Padding] coarest_stride should greater than 0" - << std::endl; - exit(-1); - } - } - if (item["target_size"].IsDefined()) { - if (item["target_size"].IsScalar()) { - width_ = item["target_size"].as(); - height_ = item["target_size"].as(); - } else if (item["target_size"].IsSequence()) { - width_ = item["target_size"].as>()[0]; - height_ = item["target_size"].as>()[1]; - } - } - if (item["im_padding_value"].IsDefined()) { - value_ = item["im_padding_value"].as>(); - } - } - virtual bool Run(cv::Mat* im, ImageBlob* data); - - private: - int coarsest_stride_ = -1; - int width_ = 0; - int height_ = 0; - std::vector value_; -}; - class Transforms { public: void Init(const YAML::Node& node, bool to_rgb = true); std::shared_ptr CreateTransform(const std::string& name); - bool Run(cv::Mat* im, ImageBlob* data); + bool Run(cv::Mat* im, Blob::ptr data); private: std::vector> transforms_; diff --git a/deploy/openvino/src/paddlex.cpp b/deploy/openvino/src/paddlex.cpp index 1fdd004f6b92eedc716e7187a044f57e99a8ea4c..07d49913ae48ad5eece97f4dcfeed48ce68bda4d 100644 --- a/deploy/openvino/src/paddlex.cpp +++ b/deploy/openvino/src/paddlex.cpp @@ -29,42 +29,8 @@ void Model::create_predictor(const std::string& model_dir, input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR); input_info->setLayout(Layout::NCHW); input_info->setPrecision(Precision::FP32); - executable_network_ = ie.LoadNetwork(network_, device); - load_config(cfg_dir); -} - -template -void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, int batchIndex = 0) { - InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims(); - const size_t width = blobSize[3]; - const size_t height = blobSize[2]; - const size_t channels = blobSize[1]; - InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as(blob); - if (!mblob) { - THROW_IE_EXCEPTION << "We expect blob to be inherited from MemoryBlob in matU8ToBlob, " - << "but by fact we were not able to cast inputBlob to MemoryBlob"; - } - // locked memory holder should be alive all time while access to its buffer happens - auto mblobHolder = mblob->wmap(); - - T *blob_data = mblobHolder.as(); - cv::Mat resized_image(orig_image); - if (static_cast(width) != orig_image.size().width || - static_cast(height) != orig_image.size().height) { - cv::resize(orig_image, resized_image, cv::Size(width, height)); - } - - int batchOffset = batchIndex * width * height * channels; - - for (size_t c = 0; c < channels; c++) { - for (size_t h = 0; h < height; h++) { - for (size_t w = 0; w < width; w++) { - blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c]; - } - } - } + load_config(cfg_dir); } bool Model::load_config(const std::string& cfg_dir) { @@ -97,7 +63,7 @@ bool Model::preprocess(cv::Mat* input_im, ImageBlob* blob) { return true; } -bool Model::predict(cv::Mat* im, ClsResult* result) { +bool Model::predict(const cv::Mat& im, ClsResult* result) { inputs_.clear(); if (type == "detector") { std::cerr << "Loading model is a 'detector', DetResult should be passed to " @@ -111,31 +77,33 @@ bool Model::predict(cv::Mat* im, ClsResult* result) { return false; } // 处理输入图像 - - if (!preprocess(im, &inputs_)) { + + executable_network = ie.LoadNetwork(network_, device); + InferRequest infer_request = executable_network.CreateInferRequest(); + std::string input_name = network_.getInputsInfo().begin()->first; + input_ = infer_request.GetBlob(input_name); + + auto im_clone = im.clone(); + if (!preprocess(&im_clone, inputs_)) { std::cerr << "Preprocess failed!" << std::endl; return false; } - InferRequest infer_request = executable_network_.CreateInferRequest(); - std::string input_name = network_.getInputsInfo().begin()->first; - //im->convertTo(*im, CV_8UC3); - Blob::Ptr input = infer_request.GetBlob(input_name); - matU8ToBlob(*im, input, 0); + infer_request.Infer(); + std::string output_name = network_.getOutputsInfo().begin()->first; - Blob::Ptr output = infer_request.GetBlob(output_name); + output_ = infer_request.GetBlob(output_name); MemoryBlob::CPtr moutput = as(output); auto moutputHolder = moutput->rmap(); - outputs_ = moutputHolder.as(); - - std::cout << sizeof(outputs_) << std::endl; + float* outputs_data = moutputHolder.as(); + // 对模型输出结果进行后处理 - auto ptr = std::max_element(outputs_, outputs_+sizeof(outputs_)); - result->category_id = std::distance(outputs_, ptr); + auto ptr = std::max_element(outputs_data, outputs_data+sizeof(outputs_)); + result->category_id = std::distance(outputs_data, ptr); result->score = *ptr; result->category = labels[result->category_id]; - //for (int i=0;i(short_size_) / static_cast(im_size_min); - if (max_size_ > 0) { - if (round(scale * im_size_max) > max_size_) { - scale = static_cast(max_size_) / static_cast(im_size_max); - } - } - return scale; -} - -bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) { - data->im_size_before_resize_.push_back({im->rows, im->cols}); - data->reshape_order_.push_back("resize"); - - float scale = GenerateScale(*im); - int width = static_cast(scale * im->cols); - int height = static_cast(scale * im->rows); - cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); - - data->new_im_size_[0] = im->rows; - data->new_im_size_[1] = im->cols; - data->scale = scale; - return true; -} - bool CenterCrop::Run(cv::Mat* im, ImageBlob* data) { int height = static_cast(im->rows); int width = static_cast(im->cols); @@ -86,55 +56,6 @@ bool CenterCrop::Run(cv::Mat* im, ImageBlob* data) { return true; } -bool Padding::Run(cv::Mat* im, ImageBlob* data) { - data->im_size_before_resize_.push_back({im->rows, im->cols}); - data->reshape_order_.push_back("padding"); - - int padding_w = 0; - int padding_h = 0; - if (width_ > 1 & height_ > 1) { - padding_w = width_ - im->cols; - padding_h = height_ - im->rows; - } else if (coarsest_stride_ > 1) { - padding_h = - ceil(im->rows * 1.0 / coarsest_stride_) * coarsest_stride_ - im->rows; - padding_w = - ceil(im->cols * 1.0 / coarsest_stride_) * coarsest_stride_ - im->cols; - } - - if (padding_h < 0 || padding_w < 0) { - std::cerr << "[Padding] Computed padding_h=" << padding_h - << ", padding_w=" << padding_w - << ", but they should be greater than 0." << std::endl; - return false; - } - cv::copyMakeBorder( - *im, *im, 0, padding_h, 0, padding_w, cv::BORDER_CONSTANT, cv::Scalar(0)); - data->new_im_size_[0] = im->rows; - data->new_im_size_[1] = im->cols; - return true; -} - -bool ResizeByLong::Run(cv::Mat* im, ImageBlob* data) { - if (long_size_ <= 0) { - std::cerr << "[ResizeByLong] long_size should be greater than 0" - << std::endl; - return false; - } - data->im_size_before_resize_.push_back({im->rows, im->cols}); - data->reshape_order_.push_back("resize"); - int origin_w = im->cols; - int origin_h = im->rows; - - int im_size_max = std::max(origin_w, origin_h); - float scale = - static_cast(long_size_) / static_cast(im_size_max); - cv::resize(*im, *im, cv::Size(), scale, scale, cv::INTER_NEAREST); - data->new_im_size_[0] = im->rows; - data->new_im_size_[1] = im->cols; - data->scale = scale; - return true; -} bool Resize::Run(cv::Mat* im, ImageBlob* data) { if (width_ <= 0 || height_ <= 0) { @@ -173,16 +94,10 @@ std::shared_ptr Transforms::CreateTransform( const std::string& transform_name) { if (transform_name == "Normalize") { return std::make_shared(); - } else if (transform_name == "ResizeByShort") { - return std::make_shared(); } else if (transform_name == "CenterCrop") { return std::make_shared(); } else if (transform_name == "Resize") { return std::make_shared(); - } else if (transform_name == "Padding") { - return std::make_shared(); - } else if (transform_name == "ResizeByLong") { - return std::make_shared(); } else { std::cerr << "There's unexpected transform(name='" << transform_name << "')." << std::endl; @@ -190,16 +105,13 @@ std::shared_ptr Transforms::CreateTransform( } } -bool Transforms::Run(cv::Mat* im, ImageBlob* data) { +bool Transforms::Run(cv::Mat* im, Blob::ptr data) { // 按照transforms中预处理算子顺序处理图像 if (to_rgb_) { cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB); } (*im).convertTo(*im, CV_32FC3); - data->ori_im_size_[0] = im->rows; - data->ori_im_size_[1] = im->cols; - data->new_im_size_[0] = im->rows; - data->new_im_size_[1] = im->cols; + for (int i = 0; i < transforms_.size(); ++i) { if (!transforms_[i]->Run(im, data)) { std::cerr << "Apply transforms to image failed!" << std::endl; @@ -208,14 +120,21 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) { } // 将图像由NHWC转为NCHW格式 - // 同时转为连续的内存块存储到ImageBlob - int h = im->rows; - int w = im->cols; - int c = im->channels(); - (data->im_data_).resize(c * h * w); - float* ptr = (data->im_data_).data(); - for (int i = 0; i < c; ++i) { - cv::extractChannel(*im, cv::Mat(h, w, CV_32FC1, ptr + i * h * w), i); + // 同时转为连续的内存块存储到Blob + SizeVector blobSize = data_->getTensorDesc().getDims(); + const size_t width = blobSize[3]; + const size_t height = blobSize[2]; + const size_t channels = blobSize[1]; + MemoryBlob::Ptr mblob = InferenceEngine::as(blob); + auto mblobHolder = mblob->wmap(); + float *blob_data = mblobHolder.as(); + for (size_t c = 0; c < channels; c++) { + for (size_t h = 0; h < height; h++) { + for (size_t w = 0; w < width; w++) { + blob_data[c * width * height + h * width + w] = + im.at(h, w)[c]; + } + } } return true; }