From bef6b6282ce91f7dbec25142b76fa4dfdbb958c0 Mon Sep 17 00:00:00 2001 From: AshihsKrShrivastava Date: Sun, 5 Apr 2020 11:02:12 +0530 Subject: [PATCH] ReflecitonPad2d and ZeroPad2d Subgraph fusion added --- modules/dnn/src/layers/permute_layer.cpp | 2 +- .../dnn/src/onnx/onnx_graph_simplifier.hpp | 6 +- modules/dnn/src/onnx/onnx_importer.cpp | 84 ++++++++++++++----- modules/dnn/test/test_onnx_importer.cpp | 6 ++ 4 files changed, 72 insertions(+), 26 deletions(-) diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index 1931a01b5f..d8e5f66678 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -342,7 +342,7 @@ public: CV_Assert(out.dims == numAxes && out.size == outputs[0].size); CV_Assert(inp.isContinuous() && out.isContinuous()); - CV_Assert(inp.type() == CV_32F && out.type() == CV_32F); + // CV_Assert(inp.type() == CV_32F && out.type() == CV_32F); if( numAxes == 4 ) { diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.hpp b/modules/dnn/src/onnx/onnx_graph_simplifier.hpp index 34924164e5..b4497adb75 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.hpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.hpp @@ -27,10 +27,8 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net); template void convertInt64ToInt32(const T1& src, T2& dst, int size) { - for (int i = 0; i < size; i++) { - if (src[i] < std::numeric_limits::min() || src[i] > std::numeric_limits::max()) { - CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range"); - } + for (int i = 0; i < size; i++) + { dst[i] = saturate_cast(src[i]); } } diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 1beea181f2..9c60dcaae9 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -39,7 +39,7 @@ class ONNXImporter struct LayerInfo { int layerId; int outputId; - LayerInfo(int _layerId, int _outputId) : layerId(_layerId), outputId(_outputId) {} + LayerInfo(int _layerId = 0, int _outputId = 0) : layerId(_layerId), outputId(_outputId) {} }; std::map getGraphTensors( @@ -300,6 +300,15 @@ void ONNXImporter::addLayer(Net& dstNet, LayerParams& layerParams, } } +static void addConstant(const std::string& name, + const Mat& blob, + std::map& constBlobs, + std::map& outShapes) +{ + constBlobs.insert(std::make_pair(name, blob)); + outShapes.insert(std::make_pair(name, shape(blob))); +} + void ONNXImporter::populateNet(Net dstNet) { CV_Assert(model_proto.has_graph()); @@ -533,6 +542,23 @@ void ONNXImporter::populateNet(Net dstNet) if (inp_size == 5) { CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end()); Mat step_blob = getBlob(node_proto, constBlobs, 4); + + // Very strange application for Slice op with tensor reversing. + // We just workaround it for 2d constants. + if (constBlobs.find(node_proto.input(0)) != constBlobs.end() && + axis == 0 && + start_blob.at(0) == -1 && step_blob.at(0) == -1 && + end_blob.at(0) == std::numeric_limits::min()) + { + Mat inp = getBlob(node_proto, constBlobs, 0); + if (inp.dims == 2) + { + Mat flipped; + flip(inp, flipped, 0); + addConstant(layerParams.name, flipped, constBlobs, outShapes); + continue; + } + } CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1"); } } @@ -547,8 +573,7 @@ void ONNXImporter::populateNet(Net dstNet) inputs.push_back(inp); runLayer(layerParams, inputs, sliced); CV_Assert(sliced.size() == 1); - constBlobs.insert(std::make_pair(layerParams.name, sliced[0])); - outShapes[layerParams.name] = shape(sliced[0]); + addConstant(layerParams.name, sliced[0], constBlobs, outShapes); continue; } } @@ -585,7 +610,7 @@ void ONNXImporter::populateNet(Net dstNet) Mat blob_1 = getBlob(node_proto, constBlobs, 1); CV_Assert(blob_0.size == blob_1.size); Mat output = isSub ? (blob_0 - blob_1) : (blob_0 + blob_1); - constBlobs.insert(std::make_pair(layerParams.name, output)); + addConstant(layerParams.name, output, constBlobs, outShapes); continue; } else if (is_const_0 || is_const_1) @@ -670,7 +695,7 @@ void ONNXImporter::populateNet(Net dstNet) { CV_Assert(node_proto.input_size() == 0); CV_Assert(layerParams.blobs.size() == 1); - constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0])); + addConstant(layerParams.name, layerParams.blobs[0], constBlobs, outShapes); continue; } else if (layer_type == "LSTM") @@ -965,7 +990,7 @@ void ONNXImporter::populateNet(Net dstNet) out = out.reshape(1, inp0.dims, inp0.size); out.dims = inp0.dims; // to workaround dims == 1 - constBlobs.insert(std::make_pair(layerParams.name, out)); + addConstant(layerParams.name, out, constBlobs, outShapes); continue; } } @@ -1033,7 +1058,7 @@ void ONNXImporter::populateNet(Net dstNet) std::vector inputs(1, getBlob(node_proto, constBlobs, 0)), transposed; runLayer(layerParams, inputs, transposed); CV_Assert(transposed.size() == 1); - constBlobs.insert(std::make_pair(layerParams.name, transposed[0])); + addConstant(layerParams.name, transposed[0], constBlobs, outShapes); continue; } } @@ -1069,8 +1094,7 @@ void ONNXImporter::populateNet(Net dstNet) Mat inp = getBlob(node_proto, constBlobs, 0); Mat out = inp.reshape(1, outShape); out.dims = outShape.size(); // to workaround dims == 1 - constBlobs.insert(std::make_pair(layerParams.name, out)); - outShapes[layerParams.name] = shape(out); + addConstant(layerParams.name, out, constBlobs, outShapes); continue; } } @@ -1085,7 +1109,7 @@ void ONNXImporter::populateNet(Net dstNet) std::vector out_size(&input.size[0], &input.size[0] + axis); out_size.push_back(input.total(axis)); Mat output = input.reshape(1, out_size); - constBlobs.insert(std::make_pair(layerParams.name, output)); + addConstant(layerParams.name, output, constBlobs, outShapes); continue; } } @@ -1108,7 +1132,7 @@ void ONNXImporter::populateNet(Net dstNet) } Mat out = input.reshape(0, dims); - constBlobs.insert(std::make_pair(layerParams.name, out)); + addConstant(layerParams.name, out, constBlobs, outShapes); continue; } @@ -1210,7 +1234,7 @@ void ONNXImporter::populateNet(Net dstNet) if (layer_id.find(node_proto.input(0)) == layer_id.end()) { std::vector inputs(1, getBlob(node_proto, constBlobs, 0)), outputs; runLayer(layerParams, inputs, outputs); - constBlobs.insert(std::make_pair(layerParams.name, outputs[0])); + addConstant(layerParams.name, outputs[0], constBlobs, outShapes); continue; } } @@ -1224,7 +1248,7 @@ void ONNXImporter::populateNet(Net dstNet) if (layer_id.find(node_proto.input(0)) == layer_id.end()) { Mat input = getBlob(node_proto, constBlobs, 0); Mat out = input.reshape(0, dim); - constBlobs.insert(std::make_pair(layerParams.name, out)); + addConstant(layerParams.name, out, constBlobs, outShapes); continue; } replaceLayerParam(layerParams, "shape", "dim"); @@ -1233,6 +1257,21 @@ void ONNXImporter::populateNet(Net dstNet) else if (layer_type == "Pad") { layerParams.type = "Padding"; + replaceLayerParam(layerParams, "mode", "type"); + if (node_proto.input_size() == 3 || node_proto.input_size() == 2) + { + // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN. + // We need to shuffle it to begin0, end0, begin1, end1, ... + Mat paddings = getBlob(node_proto, constBlobs, 1).reshape(1, 2); + paddings = paddings.t(); + layerParams.set("paddings", DictValue::arrayInt(paddings.ptr(), paddings.total())); + + if (node_proto.input_size() == 3) + { + Mat value = getBlob(node_proto, constBlobs, 2); + layerParams.set("value", value.at(0)); + } + } } else if (layer_type == "Shape") { @@ -1246,7 +1285,7 @@ void ONNXImporter::populateNet(Net dstNet) shapeMat.at(j) = inpShape[j]; shapeMat.dims = 1; - constBlobs.insert(std::make_pair(layerParams.name, shapeMat)); + addConstant(layerParams.name, shapeMat, constBlobs, outShapes); continue; } else if (layer_type == "Cast") @@ -1268,7 +1307,7 @@ void ONNXImporter::populateNet(Net dstNet) default: type = blob.type(); } blob.convertTo(blob, type); - constBlobs.insert(std::make_pair(layerParams.name, blob)); + addConstant(layerParams.name, blob, constBlobs, outShapes); continue; } else @@ -1276,11 +1315,15 @@ void ONNXImporter::populateNet(Net dstNet) } else if (layer_type == "ConstantOfShape" || layer_type == "ConstantFill") { + int depth = CV_32F; float fill_value; if (!layerParams.blobs.empty()) { CV_Assert(!layerParams.has("value")); - fill_value = layerParams.blobs[0].at(0, 0); + depth = layerParams.blobs[0].depth(); + Mat floats; + layerParams.blobs[0].convertTo(floats, CV_32F); + fill_value = floats.at(0, 0); } else fill_value = layerParams.get("value", 0); @@ -1288,9 +1331,8 @@ void ONNXImporter::populateNet(Net dstNet) MatShape inpShape = getBlob(node_proto, constBlobs, 0); for (int i = 0; i < inpShape.size(); i++) CV_CheckGT(inpShape[i], 0, ""); - Mat tensor(inpShape.size(), &inpShape[0], CV_32F, Scalar(fill_value)); - constBlobs.insert(std::make_pair(layerParams.name, tensor)); - outShapes[node_proto.output(0)] = shape(tensor); + Mat tensor(inpShape.size(), &inpShape[0], depth, Scalar(fill_value)); + addConstant(layerParams.name, tensor, constBlobs, outShapes); continue; } else if (layer_type == "Gather") @@ -1320,7 +1362,7 @@ void ONNXImporter::populateNet(Net dstNet) out = input.reshape(1, 1).colRange(index, index + 1); out.dims = dims; } - constBlobs.insert(std::make_pair(layerParams.name, out)); + addConstant(layerParams.name, out, constBlobs, outShapes); continue; } else if (layer_type == "Concat") @@ -1345,7 +1387,7 @@ void ONNXImporter::populateNet(Net dstNet) runLayer(layerParams, inputs, concatenated); CV_Assert(concatenated.size() == 1); - constBlobs.insert(std::make_pair(layerParams.name, concatenated[0])); + addConstant(layerParams.name, concatenated[0], constBlobs, outShapes); continue; } } diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 9743f5d2a1..6f36f8d3d1 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -490,6 +490,12 @@ TEST_P(Test_ONNX_layers, LSTM_bidirectional) testONNXModels("lstm_bidirectional", npy, 0, 0, false, false); } +TEST_P(Test_ONNX_layers, Pad2d_Unfused) +{ + testONNXModels("ReflectionPad2d"); + testONNXModels("ZeroPad2d"); +} + INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets()); class Test_ONNX_nets : public Test_ONNX_layers -- GitLab