提交 26466dcd 编写于 作者: S syyxsxx

fix code style

上级 bf07b81c
......@@ -62,7 +62,7 @@ int main(int argc, char** argv) {
model.predict(im, &result);
std::cout << "Predict label: " << result.category
<< ", label_id:" << result.category_id
<< ", score: " << result.score
<< ", score: " << result.score
<< ", num_img: " << model.count_num_ << std::endl;
}
} else {
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <omp.h>
......@@ -41,7 +55,7 @@ int main(int argc, char** argv) {
}
//
PaddleX::Model model;
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device);
int imgs = 1;
......@@ -49,7 +63,7 @@ int main(int argc, char** argv) {
// 进行预测
if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list);
if(!inf){
if (!inf) {
std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
return -1;
}
......@@ -58,16 +72,16 @@ int main(int argc, char** argv) {
PaddleX::DetResult result;
cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result);
if(FLAGS_save_dir != ""){
cv::Mat vis_img =
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold);
if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(
im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img);
std::cout << "Visualized output saved as " << save_path << std::endl;
}
}
}else {
} else {
PaddleX::DetResult result;
cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result);
......@@ -81,10 +95,10 @@ int main(int argc, char** argv) {
<< result.boxes[i].coordinate[2] << ", "
<< result.boxes[i].coordinate[3] << ")" << std::endl;
}
if(FLAGS_save_dir != ""){
if (FLAGS_save_dir != "") {
// 可视化
cv::Mat vis_img =
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold);
cv::Mat vis_img = PaddleX::Visualize(
im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img);
......@@ -94,5 +108,3 @@ int main(int argc, char** argv) {
}
return 0;
}
......@@ -50,12 +50,12 @@ int main(int argc, char** argv) {
//
std::cout << "init start" << std::endl;
PaddleX::Model model;
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_device);
std::cout << "init done" << std::endl;
int imgs = 1;
auto colormap = PaddleX::GenerateColorMap(model.labels.size());
if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list);
if (!inf) {
......@@ -69,7 +69,7 @@ int main(int argc, char** argv) {
PaddleX::SegResult result;
cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result);
if(FLAGS_save_dir != ""){
if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_path);
......@@ -77,13 +77,11 @@ int main(int argc, char** argv) {
std::cout << "Visualized output saved as " << save_path << std::endl;
}
}
}else{
} else {
PaddleX::SegResult result;
cv::Mat im = cv::imread(FLAGS_image, 1);
std::cout << "predict start" << std::endl;
model.predict(im, &result);
std::cout << "predict done" << std::endl;
if(FLAGS_save_dir != ""){
if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
......
......@@ -54,4 +54,4 @@ class ConfigPaser {
YAML::Node Transforms_;
};
} // namespace PaddleDetection
} // namespace PaddleX
......@@ -17,7 +17,8 @@
#include <functional>
#include <iostream>
#include <numeric>
#include <chrono>
#include <map>
#include <string>
#include "yaml-cpp/yaml.h"
......@@ -31,7 +32,7 @@
#include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
using namespace InferenceEngine;
namespace PaddleX {
......@@ -52,8 +53,8 @@ class Model {
bool preprocess(cv::Mat* input_im, ImageBlob* inputs);
bool predict(const cv::Mat& im, ClsResult* result);
bool predict(const cv::Mat& im, DetResult* result);
bool predict(const cv::Mat& im, DetResult* result);
bool predict(const cv::Mat& im, SegResult* result);
......@@ -63,8 +64,8 @@ class Model {
std::map<int, std::string> labels;
Transforms transforms_;
ImageBlob inputs_;
Blob::Ptr output_;
CNNNetwork network_;
ExecutableNetwork executable_network_;
InferenceEngine::Blob::Ptr output_;
InferenceEngine::CNNNetwork network_;
InferenceEngine::ExecutableNetwork executable_network_;
};
} // namespce of PaddleX
} // namespace PaddleX
......@@ -68,4 +68,4 @@ class SegResult : public BaseResult {
score_map.clear();
}
};
} // namespce of PaddleX
} // namespace PaddleX
......@@ -16,18 +16,17 @@
#include <yaml-cpp/yaml.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <memory>
#include <string>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <inference_engine.hpp>
using namespace InferenceEngine;
namespace PaddleX {
......@@ -38,8 +37,7 @@ namespace PaddleX {
class ImageBlob {
public:
// Original image height and width
//std::vector<int> ori_im_size_ = std::vector<int>(2);
Blob::Ptr ori_im_size_;
InferenceEngine::Blob::Ptr ori_im_size_;
// Newest image height and width after process
std::vector<int> new_im_size_ = std::vector<int>(2);
......@@ -50,7 +48,7 @@ class ImageBlob {
// Resize scale
float scale = 1.0;
// Buffer for image data after preprocessing
Blob::Ptr blob;
InferenceEngine::Blob::Ptr blob;
void clear() {
im_size_before_resize_.clear();
......@@ -90,7 +88,7 @@ class ResizeByShort : public Transform {
} else {
max_size_ = -1;
}
};
}
virtual bool Run(cv::Mat* im, ImageBlob* data);
private:
......@@ -196,12 +194,11 @@ class Padding : public Transform {
}
if (item["im_padding_value"].IsDefined()) {
im_value_ = item["im_padding_value"].as<std::vector<float>>();
}
else {
} else {
im_value_ = {0, 0, 0};
}
}
virtual bool Run(cv::Mat* im, ImageBlob* data);
private:
......
......@@ -16,36 +16,37 @@
#include <iostream>
#include <fstream>
using namespace InferenceEngine;
namespace PaddleX {
void Model::create_predictor(const std::string& model_dir,
const std::string& cfg_dir,
std::string device) {
Core ie;
network_ = ie.ReadNetwork(model_dir, model_dir.substr(0, model_dir.size() - 4) + ".bin");
InferenceEngine::Core ie;
network_ = ie.ReadNetwork(
model_dir, model_dir.substr(0, model_dir.size() - 4) + ".bin");
network_.setBatchSize(1);
InputsDataMap inputInfo(network_.getInputsInfo());
InferenceEngine::InputsDataMap inputInfo(network_.getInputsInfo());
std::string imageInputName;
for (const auto & inputInfoItem : inputInfo) {
if (inputInfoItem.second->getTensorDesc().getDims().size() == 4){
if (inputInfoItem.second->getTensorDesc().getDims().size() == 4) {
imageInputName = inputInfoItem.first;
inputInfoItem.second->setPrecision(Precision::FP32);
inputInfoItem.second->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
inputInfoItem.second->getPreProcess().setResizeAlgorithm(
RESIZE_BILINEAR);
inputInfoItem.second->setLayout(Layout::NCHW);
}
if (inputInfoItem.second->getTensorDesc().getDims().size() == 2){
if (inputInfoItem.second->getTensorDesc().getDims().size() == 2) {
imageInputName = inputInfoItem.first;
inputInfoItem.second->setPrecision(Precision::FP32);
}
}
if(device == "MYRIAD"){
if (device == "MYRIAD") {
std::map<std::string, std::string> networkConfig;
networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "ON";
executable_network_ = ie.LoadNetwork(network_, device, networkConfig);
}else{
} else {
executable_network_ = ie.LoadNetwork(network_, device);
}
load_config(cfg_dir);
......@@ -98,7 +99,8 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
return false;
}
// 处理输入图像
InferRequest infer_request = executable_network_.CreateInferRequest();
InferenceEngine::InferRequest infer_request =
executable_network_.CreateInferRequest();
std::string input_name = network_.getInputsInfo().begin()->first;
inputs_.blob = infer_request.GetBlob(input_name);
cv::Mat im_clone = im.clone();
......@@ -106,14 +108,13 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
infer_request.Infer();
std::string output_name = network_.getOutputsInfo().begin()->first;
output_ = infer_request.GetBlob(output_name);
MemoryBlob::CPtr moutput = as<MemoryBlob>(output_);
InferenceEngine::MemoryBlob::CPtr moutput =
as<InferenceEngine::MemoryBlob>(output_);
auto moutputHolder = moutput->rmap();
float* outputs_data = moutputHolder.as<float *>();
......@@ -122,7 +123,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
result->category_id = std::distance(outputs_data, ptr);
result->score = *ptr;
result->category = labels[result->category_id];
return true;
}
bool Model::predict(const cv::Mat& im, DetResult* result) {
......@@ -137,46 +138,45 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
"to function predict()!" << std::endl;
return false;
}
InferRequest infer_request = executable_network_.CreateInferRequest();
InputsDataMap input_maps = network_.getInputsInfo();
InferenceEngine::InferRequest infer_request =
executable_network_.CreateInferRequest();
InferenceEngine::InputsDataMap input_maps = network_.getInputsInfo();
std::string inputName;
for (const auto & input_map : input_maps) {
if (input_map.second->getTensorDesc().getDims().size() == 4){
if (input_map.second->getTensorDesc().getDims().size() == 4) {
inputName = input_map.first;
inputs_.blob = infer_request.GetBlob(inputName);
}
if (input_map.second->getTensorDesc().getDims().size() == 2){
if (input_map.second->getTensorDesc().getDims().size() == 2) {
inputName = input_map.first;
inputs_.ori_im_size_ = infer_request.GetBlob(inputName);
}
}
}
cv::Mat im_clone = im.clone();
if (!preprocess(&im_clone, &inputs_)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
infer_request.Infer();
OutputsDataMap out_map = network_.getOutputsInfo();
InferenceEngine::OutputsDataMap out_map = network_.getOutputsInfo();
auto iter = out_map.begin();
std::string outputName = iter->first;
std::cout << "output: " << outputName << std::endl;
Blob::Ptr output = infer_request.GetBlob(outputName);
MemoryBlob::CPtr moutput = as<MemoryBlob>(output);
TensorDesc blob_output = moutput->getTensorDesc();
InferenceEngine::Blob::Ptr output = infer_request.GetBlob(outputName);
InferenceEngine::MemoryBlob::CPtr moutput =
as<InferenceEngine::MemoryBlob>(output);
InferenceEngine::TensorDesc blob_output = moutput->getTensorDesc();
std::vector<size_t> output_shape = blob_output.getDims();
auto moutputHolder = moutput->rmap();
float* data = moutputHolder.as<float *>();
int size = 1;
for (auto& i : output_shape){
for (auto& i : output_shape) {
size *= static_cast<int>(i);
}
int num_boxes = size / 6;
for (int i = 0; i < num_boxes; ++i){
if(data[i * 6] > 0){
for (int i = 0; i < num_boxes; ++i) {
if (data[i * 6] > 0) {
Box box;
box.category_id = static_cast<int>(data[i * 6]);
box.category = labels[box.category_id];
......@@ -207,10 +207,11 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
return false;
}
//
InferRequest infer_request = executable_network_.CreateInferRequest();
InferenceEngine::InferRequest infer_request =
executable_network_.CreateInferRequest();
std::string input_name = network_.getInputsInfo().begin()->first;
inputs_.blob = infer_request.GetBlob(input_name);
//
cv::Mat im_clone = im.clone();
if (!preprocess(&im_clone, &inputs_)) {
......@@ -220,19 +221,19 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
//
infer_request.Infer();
if (count_num_ >= 20){
if (count_num_ >= 20) {
total_time_ = total_time_ + time_used.count();
}
OutputsDataMap out_map = network_.getOutputsInfo();
OInferenceEngine::utputsDataMap out_map = network_.getOutputsInfo();
auto iter = out_map.begin();
iter++;
std::string output_name_score = iter->first;
std::cout << iter->first << std::endl;
Blob::Ptr output_score = infer_request.GetBlob(output_name_score);
MemoryBlob::CPtr moutput_score = as<MemoryBlob>(output_score);
TensorDesc blob_score = moutput_score->getTensorDesc();
InferenceEngine::Blob::Ptr output_score =
infer_request.GetBlob(output_name_score);
InferenceEngine::MemoryBlob::CPtr moutput_score =
as<InferenceEngine::MemoryBlob>(output_score);
InferenceEngine::TensorDesc blob_score = moutput_score->getTensorDesc();
std::vector<size_t> output_score_shape = blob_score.getDims();
int size = 1;
for (auto& i : output_score_shape) {
......@@ -242,14 +243,15 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->score_map.data.resize(size);
auto moutputHolder_score = moutput_score->rmap();
float* score_data = moutputHolder_score.as<float *>();
memcpy(result->score_map.data.data(),score_data,moutput_score->byteSize());
memcpy(result->score_map.data.data(), score_data, moutput_score->byteSize());
iter++;
std::string output_name_label = iter->first;
std::cout << iter->first << std::endl;
Blob::Ptr output_label = infer_request.GetBlob(output_name_label);
MemoryBlob::CPtr moutput_label = as<MemoryBlob>(output_label);
TensorDesc blob_label = moutput_label->getTensorDesc();
InferenceEngine::Blob::Ptr output_label =
infer_request.GetBlob(output_name_label);
InferenceEngine::MemoryBlob::CPtr moutput_label =
as<InferenceEngine::MemoryBlob>(output_label);
InferenceEngine::TensorDesc blob_label = moutput_label->getTensorDesc();
std::vector<size_t> output_label_shape = blob_label.getDims();
size = 1;
for (auto& i : output_label_shape) {
......@@ -259,7 +261,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->label_map.data.resize(size);
auto moutputHolder_label = moutput_label->rmap();
int* label_data = moutputHolder_label.as<int *>();
memcpy(result->label_map.data.data(),label_data,moutput_label->byteSize());
memcpy(result->label_map.data.data(), label_data, moutput_label->byteSize());
......@@ -315,5 +317,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->score_map.shape = {mask_score.rows, mask_score.cols};
return true;
}
} // namespce of PaddleX
} // namespace PaddleX
......@@ -12,13 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/paddlex/transforms.h"
#include <math.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <math.h>
#include "include/paddlex/transforms.h"
namespace PaddleX {
......@@ -28,7 +30,7 @@ std::map<std::string, int> interpolations = {{"LINEAR", cv::INTER_LINEAR},
{"CUBIC", cv::INTER_CUBIC},
{"LANCZOS4", cv::INTER_LANCZOS4}};
bool Normalize::Run(cv::Mat* im, ImageBlob* data){
bool Normalize::Run(cv::Mat* im, ImageBlob* data) {
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
......@@ -62,16 +64,13 @@ float ResizeByShort::GenerateScale(const cv::Mat& im) {
bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) {
data->im_size_before_resize_.push_back({im->rows, im->cols});
data->reshape_order_.push_back("resize");
float scale = GenerateScale(*im);
int width = static_cast<int>(round(scale * im->cols));
int height = static_cast<int>(round(scale * im->rows));
cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR);
data->new_im_size_[0] = im->rows;
data->new_im_size_[1] = im->cols;
data->scale = scale;
return true;
}
......@@ -166,7 +165,8 @@ bool Resize::Run(cv::Mat* im, ImageBlob* data) {
return true;
}
void Transforms::Init(const YAML::Node& transforms_node, std::string type, bool to_rgb) {
void Transforms::Init(
const YAML::Node& transforms_node, std::string type, bool to_rgb) {
transforms_.clear();
to_rgb_ = to_rgb;
type_ = type;
......@@ -206,20 +206,18 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
}
(*im).convertTo(*im, CV_32FC3);
if(type_ == "detector" ){
LockedMemory<void> input2Mapped = as<MemoryBlob>(data->ori_im_size_)->wmap();
if (type_ == "detector") {
LockedMemory<void> input2Mapped = as<MemoryBlob>(
data->ori_im_size_)->wmap();
float *p = input2Mapped.as<float*>();
p[0] = im->rows;
p[1] = im->cols;
}
//data->ori_im_size_[0] = im->rows;
//data->ori_im_size_[1] = im->cols;
data->new_im_size_[0] = im->rows;
data->new_im_size_[1] = im->cols;
for (int i = 0; i < transforms_.size(); ++i) {
if (!transforms_[i]->Run(im,data)) {
if (!transforms_[i]->Run(im, data)) {
std::cerr << "Apply transforms to image failed!" << std::endl;
return false;
}
......@@ -227,15 +225,14 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
// 将图像由NHWC转为NCHW格式
// 同时转为连续的内存块存储到Blob
SizeVector blobSize = data->blob->getTensorDesc().getDims();
InferenceEngine::SizeVector blobSize = data->blob->getTensorDesc().getDims();
const size_t width = blobSize[3];
const size_t height = blobSize[2];
const size_t channels = blobSize[1];
MemoryBlob::Ptr mblob = InferenceEngine::as<MemoryBlob>(data->blob);
MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(
data->blob);
auto mblobHolder = mblob->wmap();
float *blob_data = mblobHolder.as<float *>();
for (size_t c = 0; c < channels; c++) {
for (size_t h = 0; h < height; h++) {
for (size_t w = 0; w < width; w++) {
......
......@@ -63,7 +63,7 @@ int main(int argc, char** argv) {
model.predict(im, &result);
std::cout << "Predict label: " << result.category
<< ", label_id:" << result.category_id
<< ", score: " << result.score
<< ", score: " << result.score
<< ", num_img: " << model.count_num_ << std::endl;
}
} else {
......
......@@ -55,7 +55,7 @@ int main(int argc, char** argv) {
}
//
PaddleX::Model model;
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_thread_num);
int imgs = 1;
......@@ -63,7 +63,7 @@ int main(int argc, char** argv) {
// 进行预测
if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list);
if(!inf){
if (!inf) {
std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
return -1;
}
......@@ -73,16 +73,16 @@ int main(int argc, char** argv) {
PaddleX::DetResult result;
cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result);
if(FLAGS_save_dir != ""){
cv::Mat vis_img =
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold);
if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(
im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img);
std::cout << "Visualized output saved as " << save_path << std::endl;
}
}
}else {
} else {
PaddleX::DetResult result;
cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result);
......@@ -96,10 +96,10 @@ int main(int argc, char** argv) {
<< result.boxes[i].coordinate[2] << ", "
<< result.boxes[i].coordinate[3] << ")" << std::endl;
}
if(FLAGS_save_dir != ""){
if (FLAGS_save_dir != "") {
// 可视化
cv::Mat vis_img =
PaddleX::Visualize(im, result, model.labels, colormap, FLAGS_threshold);
cv::Mat vis_img = PaddleX::Visualize(
im, result, model.labels, colormap, FLAGS_threshold);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
cv::imwrite(save_path, vis_img);
......@@ -109,5 +109,3 @@ int main(int argc, char** argv) {
}
return 0;
}
......@@ -49,12 +49,11 @@ int main(int argc, char** argv) {
//
std::cout << "init start" << std::endl;
PaddleX::Model model;
PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_cfg_dir, FLAGS_thread_num);
std::cout << "init done" << std::endl;
int imgs = 1;
auto colormap = PaddleX::GenerateColorMap(model.labels.size());
if (FLAGS_image_list != "") {
std::ifstream inf(FLAGS_image_list);
if (!inf) {
......@@ -67,23 +66,19 @@ int main(int argc, char** argv) {
PaddleX::SegResult result;
cv::Mat im = cv::imread(image_path, 1);
model.predict(im, &result);
if(FLAGS_save_dir != ""){
if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_path);
cv::imwrite(save_path, vis_img);
std::cout << "Visualized output saved as " << save_path << std::endl;
}
}
}else{
} else {
PaddleX::SegResult result;
cv::Mat im = cv::imread(FLAGS_image, 1);
std::cout << "predict start" << std::endl;
model.predict(im, &result);
std::cout << "predict done" << std::endl;
if(FLAGS_save_dir != ""){
if (FLAGS_save_dir != "") {
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
......
......@@ -54,4 +54,4 @@ class ConfigPaser {
YAML::Node Transforms_;
};
} // namespace PaddleDetection
} // namespace PaddleX
......@@ -14,26 +14,35 @@
#pragma once
#include <arm_neon.h>
#include <paddle_api.h>
#include <functional>
#include <iostream>
#include <numeric>
#include <chrono>
#include <map>
#include <string>
#include <memory>
#include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
#include "yaml-cpp/yaml.h"
#ifdef _WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
#include "paddle_api.h"
#include <arm_neon.h>
#include "include/paddlex/config_parser.h"
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
using namespace paddle::lite_api;
namespace PaddleX {
......@@ -54,8 +63,8 @@ class Model {
bool preprocess(cv::Mat* input_im, ImageBlob* inputs);
bool predict(const cv::Mat& im, ClsResult* result);
bool predict(const cv::Mat& im, DetResult* result);
bool predict(const cv::Mat& im, DetResult* result);
bool predict(const cv::Mat& im, SegResult* result);
......@@ -65,7 +74,6 @@ class Model {
std::map<int, std::string> labels;
Transforms transforms_;
ImageBlob inputs_;
std::shared_ptr<PaddlePredictor> predictor_;
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor_;
};
} // namespce of PaddleX
} // namespace PaddleX
......@@ -68,4 +68,4 @@ class SegResult : public BaseResult {
score_map.clear();
}
};
} // namespce of PaddleX
} // namespace PaddleX
......@@ -15,6 +15,7 @@
#pragma once
#include <yaml-cpp/yaml.h>
#include <paddle_api.h>
#include <memory>
#include <string>
......@@ -26,9 +27,7 @@
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "paddle_api.h"
using namespace paddle::lite_api;
namespace PaddleX {
......@@ -90,7 +89,7 @@ class ResizeByShort : public Transform {
} else {
max_size_ = -1;
}
};
}
virtual bool Run(cv::Mat* im, ImageBlob* data);
private:
......@@ -196,12 +195,11 @@ class Padding : public Transform {
}
if (item["im_padding_value"].IsDefined()) {
im_value_ = item["im_padding_value"].as<std::vector<float>>();
}
else {
} else {
im_value_ = {0, 0, 0};
}
}
virtual bool Run(cv::Mat* im, ImageBlob* data);
private:
......
......@@ -16,18 +16,17 @@
#include <iostream>
#include <fstream>
using namespace paddle::lite_api;
namespace PaddleX {
void Model::create_predictor(const std::string& model_dir,
const std::string& cfg_dir,
int thread_num) {
MobileConfig config;
paddle::lite_api::MobileConfig config;
config.set_model_from_file(model_dir);
config.set_threads(thread_num);
config.set_threads(thread_num);
load_config(cfg_dir);
predictor_ = CreatePaddlePredictor<MobileConfig>(config);
predictor_ = CreatePaddlePredictor<paddle::lite_api::MobileConfig>(config);
}
bool Model::load_config(const std::string& cfg_dir) {
......@@ -83,12 +82,11 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
;
predictor_->Run();
predictor_->Run();
std::unique_ptr<const Tensor> output_tensor(std::move(predictor_->GetOutput(0)));
std::unique_ptr<const paddle::lite_api::Tensor> output_tensor(
std::move(predictor_->GetOutput(0)));
const float *outputs_data = output_tensor->mutable_data<float>();
......@@ -97,10 +95,6 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
result->category_id = std::distance(outputs_data, ptr);
result->score = *ptr;
result->category = labels[result->category_id];
//for (int i=0;i<sizeof(outputs_data);i++){
// std::cout << labels[i] << std::endl;
// std::cout << outputs_[i] << std::endl;
// }
}
bool Model::predict(const cv::Mat& im, DetResult* result) {
......@@ -115,7 +109,6 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
"to function predict()!" << std::endl;
return false;
}
inputs_.input_tensor_ = std::move(predictor_->GetInput(0));
cv::Mat im_clone = im.clone();
......@@ -126,18 +119,14 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
int h = inputs_.new_im_size_[0];
int w = inputs_.new_im_size_[1];
if (name == "YOLOv3") {
std::unique_ptr<Tensor> im_size_tensor(std::move(predictor_->GetInput(1)));
const std::vector<int64_t> IM_SIZE_SHAPE = {1,2};
std::unique_ptr<paddle::lite_api::Tensor> im_size_tensor(
std::move(predictor_->GetInput(1)));
const std::vector<int64_t> IM_SIZE_SHAPE = {1, 2};
im_size_tensor->Resize(IM_SIZE_SHAPE);
auto *im_size_data = im_size_tensor->mutable_data<int>();
memcpy(im_size_data, inputs_.ori_im_size_.data(), 1*2*sizeof(int));
}
predictor_->Run();
auto output_names = predictor_->GetOutputNames();
auto output_box_tensor = predictor_->GetTensor(output_names[0]);
const float *output_box = output_box_tensor->mutable_data<float>();
......@@ -177,27 +166,17 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
"function predict()!" << std::endl;
return false;
}
inputs_.input_tensor_ = std::move(predictor_->GetInput(0));
cv::Mat im_clone = im.clone();
if (!preprocess(&im_clone, &inputs_)) {
std::cerr << "Preprocess failed!" << std::endl;
return false;
}
std::cout << "Preprocess is done" << std::endl;
predictor_->Run();
auto output_names = predictor_->GetOutputNames();
auto output_label_tensor = predictor_->GetTensor(output_names[0]);
std::cout << "output0" << output_names[0] << std::endl;
std::cout << "output1" << output_names[1] << std::endl;
const int64_t *label_data = output_label_tensor->mutable_data<int64_t>();
std::vector<int64_t> output_label_shape = output_label_tensor->shape();
int size = 1;
......@@ -272,5 +251,4 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
result->score_map.shape = {mask_score.rows, mask_score.cols};
return true;
}
} // namespce of PaddleX
} // namespace PaddleX
......@@ -12,12 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/paddlex/transforms.h"
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <math.h>
#include "include/paddlex/transforms.h"
namespace PaddleX {
......@@ -27,7 +31,7 @@ std::map<std::string, int> interpolations = {{"LINEAR", cv::INTER_LINEAR},
{"CUBIC", cv::INTER_CUBIC},
{"LANCZOS4", cv::INTER_LANCZOS4}};
bool Normalize::Run(cv::Mat* im, ImageBlob* data){
bool Normalize::Run(cv::Mat* im, ImageBlob* data) {
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
......@@ -66,11 +70,9 @@ bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) {
int width = static_cast<int>(round(scale * im->cols));
int height = static_cast<int>(round(scale * im->rows));
cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR);
data->new_im_size_[0] = im->rows;
data->new_im_size_[1] = im->cols;
data->scale = scale;
return true;
}
......@@ -204,7 +206,6 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
}
(*im).convertTo(*im, CV_32FC3);
data->ori_im_size_[0] = im->rows;
data->ori_im_size_[1] = im->cols;
data->new_im_size_[0] = im->rows;
......@@ -219,14 +220,12 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
// 将图像由NHWC转为NCHW格式
// 同时转为连续的内存块存储到Blob
int height = im->rows;
int width = im->cols;
int channels = im->channels();
int channels = im->channels();
const std::vector<int64_t> INPUT_SHAPE = {1, channels, height, width};
data->input_tensor_->Resize(INPUT_SHAPE);
auto *input_data = data->input_tensor_->mutable_data<float>();
for (size_t c = 0; c < channels; c++) {
for (size_t h = 0; h < height; h++) {
for (size_t w = 0; w < width; w++) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册