未验证 提交 5df6b4a7 编写于 作者: D Dmitry Kurtaev 提交者: GitHub

Merge pull request #23325 from dkurt:dnn_input_info

Propagate inputs info for ONNX and TFLite models

### Pull Request Readiness Checklist

Needed for generic applications such as benchmarking pipelines. So OpenCV can tell about the default input shapes specified in the models.

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
上级 a60408cd
......@@ -1400,6 +1400,7 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
Mat blob_ = blob.getMat(); // can't use InputArray directly due MatExpr stuff
MatShape blobShape = shape(blob_);
#if 0 // TODO: DNNTestNetwork.MobileNet_SSD_Caffe_Different_Width_Height/0
if (pin.lid == 0)
{
CV_Assert(!netInputLayer.empty());
......@@ -1411,7 +1412,6 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
if (!inputShapeLimitation.empty())
{
CV_CheckEQ(inputShapeLimitation.size(), blobShape.size(), "");
#if 0 // TODO: DNNTestNetwork.MobileNet_SSD_Caffe_Different_Width_Height/0
const size_t dims = inputShapeLimitation.size();
for (size_t dim = 0; dim < dims; dim++)
{
......@@ -1419,10 +1419,10 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
continue; // don't limit batch
CV_CheckEQ(inputShapeLimitation[dim], blobShape[dim], "");
}
#endif
}
}
}
#endif
LayerData& ld = layers[pin.lid];
const int numInputs = std::max(pin.oid + 1, (int)ld.requiredOutputs.size());
......
......@@ -891,6 +891,11 @@ void ONNXImporter::populateNet()
}
dstNet.setInputsNames(netInputs);
if (!hasDynamicShapes)
{
for (int i = 0; i < netInputs.size(); ++i)
dstNet.setInputShape(netInputs[i], outShapes[netInputs[i]]);
}
// dump outputs
for (int i = 0; i < graph_proto.output_size(); ++i)
......
......@@ -163,6 +163,8 @@ void TFLiteImporter::populateNet()
CV_Assert(modelTensors);
layouts.resize(modelTensors->size(), DATA_LAYOUT_UNKNOWN);
size_t subgraph_inputs_size = subgraph_inputs->size();
std::vector<std::string> inputsNames(subgraph_inputs_size);
std::vector<MatShape> inputsShapes(subgraph_inputs_size);
for (size_t i = 0; i < subgraph_inputs_size; ++i)
{
int idx = subgraph_inputs->Get(i);
......@@ -171,7 +173,24 @@ void TFLiteImporter::populateNet()
if (!tensor)
CV_Error(Error::StsError, cv::format("DNN/TFLite: subgraph input %d (%d) is NULL", (int)i, idx));
layouts[idx] = estimateLayout(*tensor);
// Keep info about origin inputs names and shapes
inputsNames[i] = tensor->name()->str();
std::vector<int> shape(tensor->shape()->begin(), tensor->shape()->end());
if (layouts[idx] == DATA_LAYOUT_NHWC) {
CV_CheckEQ(shape.size(), (size_t)4, "");
std::swap(shape[2], shape[3]);
std::swap(shape[1], shape[2]);
}
inputsShapes[i] = shape;
}
dstNet.setInputsNames(inputsNames);
for (size_t i = 0; i < subgraph_inputs_size; ++i)
{
dstNet.setInputShape(inputsNames[i], inputsShapes[i]);
}
const auto& all_operators = *subgraph_operators;
const size_t all_operators_size = all_operators.size();
for (size_t op_idx = 0; op_idx < all_operators_size; ++op_idx)
......
......@@ -30,6 +30,27 @@ public:
pb
};
void testInputShapes(const Net& net, const std::vector<Mat>& inps)
{
std::vector<MatShape> inLayerShapes;
std::vector<MatShape> outLayerShapes;
net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
ASSERT_EQ(inLayerShapes.size(), inps.size());
for (int i = 0; i < inps.size(); ++i) {
bool hasDynamicShapes = inLayerShapes[i].empty();
if (hasDynamicShapes)
continue;
if (inLayerShapes[i].size() == 1) { // 1D input
ASSERT_EQ(shape(inLayerShapes[i][0], 1), shape(inps[i]));
} else {
// Compare all axes except batch dimension which is variable.
inLayerShapes[i][0] = inps[i].size[0];
ASSERT_EQ(inLayerShapes[i], shape(inps[i]));
}
}
}
void testONNXModels(const String& basename, const Extension ext = npy,
const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
bool checkNoFallbacks = true, int numInps = 1)
......@@ -54,6 +75,8 @@ public:
Net net = readNetFromONNX(onnxmodel);
ASSERT_FALSE(net.empty());
testInputShapes(net, inps);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
......@@ -2315,6 +2338,8 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics)
lInf = 0.06;
}
testInputShapes(net, {input0});
checkBackend(&input0, &ref0);
net.setInput(input0);
Mat out = net.forward().clone();
......
......@@ -11,6 +11,7 @@ Test for TFLite models loading
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
#include <opencv2/dnn/utils/debug_utils.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#ifdef OPENCV_TEST_DNN_TFLITE
......@@ -19,9 +20,21 @@ namespace opencv_test { namespace {
using namespace cv;
using namespace cv::dnn;
void testInputShapes(const Net& net, const std::vector<Mat>& inps) {
std::vector<MatShape> inLayerShapes;
std::vector<MatShape> outLayerShapes;
net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
ASSERT_EQ(inLayerShapes.size(), inps.size());
for (int i = 0; i < inps.size(); ++i) {
ASSERT_EQ(inLayerShapes[i], shape(inps[i]));
}
}
void testModel(const std::string& modelName, const Mat& input, double l1 = 1e-5, double lInf = 1e-4)
{
Net net = readNet(findDataFile("dnn/tflite/" + modelName + ".tflite", false));
testInputShapes(net, {input});
net.setInput(input);
std::vector<String> outNames = net.getUnconnectedOutLayersNames();
......@@ -72,6 +85,7 @@ TEST(Test_TFLite, max_unpooling)
cvtColor(input, input, COLOR_BGR2RGBA);
input = input.mul(Scalar(1, 1, 1, 0));
input = blobFromImage(input, 1.0 / 255);
testInputShapes(net, {input});
net.setInput(input);
std::vector<std::vector<Mat> > outs;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册