未验证 提交 e7007668 编写于 作者: M Maxim Pashchenkov 提交者: GitHub

Merge pull request #19792 from mpashchenkov:mp/ie-add-int32

G-API: IE. Adding support for INT32 type.

* Added support for int32

* Added sample for semantic-segmentation-adas-0001

* Alignment

* Alignment 2

* Rstrt build

* Removed test for sem seg
上级 105b57c8
#include <opencv2/imgproc.hpp>
#include <opencv2/gapi/infer/ie.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp>
const std::string keys =
"{ h help | | Print this help message }"
"{ input | | Path to the input video file }"
"{ output | | Path to the output video file }"
"{ ssm | semantic-segmentation-adas-0001.xml | Path to OpenVINO IE semantic segmentation model (.xml) }";
// 20 colors for 20 classes of semantic-segmentation-adas-0001
const std::vector<cv::Vec3b> colors = {
{ 128, 64, 128 },
{ 232, 35, 244 },
{ 70, 70, 70 },
{ 156, 102, 102 },
{ 153, 153, 190 },
{ 153, 153, 153 },
{ 30, 170, 250 },
{ 0, 220, 220 },
{ 35, 142, 107 },
{ 152, 251, 152 },
{ 180, 130, 70 },
{ 60, 20, 220 },
{ 0, 0, 255 },
{ 142, 0, 0 },
{ 70, 0, 0 },
{ 100, 60, 0 },
{ 90, 0, 0 },
{ 230, 0, 0 },
{ 32, 11, 119 },
{ 0, 74, 111 },
};
namespace {
std::string get_weights_path(const std::string &model_path) {
const auto EXT_LEN = 4u;
const auto sz = model_path.size();
CV_Assert(sz > EXT_LEN);
auto ext = model_path.substr(sz - EXT_LEN);
std::transform(ext.begin(), ext.end(), ext.begin(), [](unsigned char c){
return static_cast<unsigned char>(std::tolower(c));
});
CV_Assert(ext == ".xml");
return model_path.substr(0u, sz - EXT_LEN) + ".bin";
}
} // anonymous namespace
namespace custom {
G_API_OP(PostProcessing, <cv::GMat(cv::GMat, cv::GMat)>, "sample.custom.post_processing") {
static cv::GMatDesc outMeta(const cv::GMatDesc &in, const cv::GMatDesc &) {
return in;
}
};
GAPI_OCV_KERNEL(OCVPostProcessing, PostProcessing) {
static void run(const cv::Mat &in, const cv::Mat &detected_classes, cv::Mat &out) {
// This kernel constructs output image by class table and colors vector
// The semantic-segmentation-adas-0001 output a blob with the shape
// [B, C=1, H=1024, W=2048]
const int outHeight = 1024;
const int outWidth = 2048;
cv::Mat maskImg(outHeight, outWidth, CV_8UC3);
const int* const classes = detected_classes.ptr<int>();
for (int rowId = 0; rowId < outHeight; ++rowId) {
for (int colId = 0; colId < outWidth; ++colId) {
size_t classId = static_cast<size_t>(classes[rowId * outWidth + colId]);
maskImg.at<cv::Vec3b>(rowId, colId) =
classId < colors.size()
? colors[classId]
: cv::Vec3b{0, 0, 0}; // sample detects 20 classes
}
}
cv::resize(maskImg, out, in.size());
const float blending = 0.3f;
out = in * blending + out * (1 - blending);
}
};
} // namespace custom
int main(int argc, char *argv[]) {
cv::CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
// Prepare parameters first
const std::string input = cmd.get<std::string>("input");
const std::string output = cmd.get<std::string>("output");
const auto model_path = cmd.get<std::string>("ssm");
const auto weights_path = get_weights_path(model_path);
const auto device = "CPU";
G_API_NET(SemSegmNet, <cv::GMat(cv::GMat)>, "semantic-segmentation");
const auto net = cv::gapi::ie::Params<SemSegmNet> {
model_path, weights_path, device
};
const auto kernels = cv::gapi::kernels<custom::OCVPostProcessing>();
const auto networks = cv::gapi::networks(net);
// Now build the graph
cv::GMat in;
cv::GMat detected_classes = cv::gapi::infer<SemSegmNet>(in);
cv::GMat out = custom::PostProcessing::on(in, detected_classes);
cv::GStreamingCompiled pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
// The execution part
pipeline.setSource(std::move(inputs));
pipeline.start();
cv::VideoWriter writer;
cv::Mat outMat;
while (pipeline.pull(cv::gout(outMat))) {
cv::imshow("Out", outMat);
cv::waitKey(1);
if (!output.empty()) {
if (!writer.isOpened()) {
const auto sz = cv::Size{outMat.cols, outMat.rows};
writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz);
CV_Assert(writer.isOpened());
}
writer << outMat;
}
}
return 0;
}
......@@ -104,6 +104,7 @@ inline IE::Layout toIELayout(const std::size_t ndims) {
inline IE::Precision toIE(int depth) {
switch (depth) {
case CV_8U: return IE::Precision::U8;
case CV_32S: return IE::Precision::I32;
case CV_32F: return IE::Precision::FP32;
default: GAPI_Assert(false && "Unsupported data type");
}
......@@ -113,6 +114,7 @@ inline int toCV(IE::Precision prec) {
switch (prec) {
case IE::Precision::U8: return CV_8U;
case IE::Precision::FP32: return CV_32F;
case IE::Precision::I32: return CV_32S;
default: GAPI_Assert(false && "Unsupported data type");
}
return -1;
......@@ -154,6 +156,7 @@ inline IE::Blob::Ptr wrapIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) {
case CV_##E: return IE::make_shared_blob<T>(tDesc, const_cast<T*>(mat.ptr<T>()))
HANDLE(8U, uint8_t);
HANDLE(32F, float);
HANDLE(32S, int);
#undef HANDLE
default: GAPI_Assert(false && "Unsupported data type");
}
......@@ -189,6 +192,7 @@ inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
break;
HANDLE(U8, uint8_t);
HANDLE(FP32, float);
HANDLE(I32, int);
#undef HANDLE
default: GAPI_Assert(false && "Unsupported data type");
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册