提交 bef6b628 编写于 作者: A AshihsKrShrivastava

ReflecitonPad2d and ZeroPad2d Subgraph fusion added

上级 e1f14fca
......@@ -342,7 +342,7 @@ public:
CV_Assert(out.dims == numAxes && out.size == outputs[0].size);
CV_Assert(inp.isContinuous() && out.isContinuous());
CV_Assert(inp.type() == CV_32F && out.type() == CV_32F);
// CV_Assert(inp.type() == CV_32F && out.type() == CV_32F);
if( numAxes == 4 )
{
......
......@@ -27,10 +27,8 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net);
template<typename T1, typename T2>
void convertInt64ToInt32(const T1& src, T2& dst, int size)
{
for (int i = 0; i < size; i++) {
if (src[i] < std::numeric_limits<int32_t>::min() || src[i] > std::numeric_limits<int32_t>::max()) {
CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
}
for (int i = 0; i < size; i++)
{
dst[i] = saturate_cast<int32_t>(src[i]);
}
}
......
......@@ -39,7 +39,7 @@ class ONNXImporter
struct LayerInfo {
int layerId;
int outputId;
LayerInfo(int _layerId, int _outputId) : layerId(_layerId), outputId(_outputId) {}
LayerInfo(int _layerId = 0, int _outputId = 0) : layerId(_layerId), outputId(_outputId) {}
};
std::map<std::string, Mat> getGraphTensors(
......@@ -300,6 +300,15 @@ void ONNXImporter::addLayer(Net& dstNet, LayerParams& layerParams,
}
}
static void addConstant(const std::string& name,
const Mat& blob,
std::map<std::string, Mat>& constBlobs,
std::map<std::string, MatShape>& outShapes)
{
constBlobs.insert(std::make_pair(name, blob));
outShapes.insert(std::make_pair(name, shape(blob)));
}
void ONNXImporter::populateNet(Net dstNet)
{
CV_Assert(model_proto.has_graph());
......@@ -533,6 +542,23 @@ void ONNXImporter::populateNet(Net dstNet)
if (inp_size == 5) {
CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
Mat step_blob = getBlob(node_proto, constBlobs, 4);
// Very strange application for Slice op with tensor reversing.
// We just workaround it for 2d constants.
if (constBlobs.find(node_proto.input(0)) != constBlobs.end() &&
axis == 0 &&
start_blob.at<int>(0) == -1 && step_blob.at<int>(0) == -1 &&
end_blob.at<int>(0) == std::numeric_limits<int32_t>::min())
{
Mat inp = getBlob(node_proto, constBlobs, 0);
if (inp.dims == 2)
{
Mat flipped;
flip(inp, flipped, 0);
addConstant(layerParams.name, flipped, constBlobs, outShapes);
continue;
}
}
CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1");
}
}
......@@ -547,8 +573,7 @@ void ONNXImporter::populateNet(Net dstNet)
inputs.push_back(inp);
runLayer(layerParams, inputs, sliced);
CV_Assert(sliced.size() == 1);
constBlobs.insert(std::make_pair(layerParams.name, sliced[0]));
outShapes[layerParams.name] = shape(sliced[0]);
addConstant(layerParams.name, sliced[0], constBlobs, outShapes);
continue;
}
}
......@@ -585,7 +610,7 @@ void ONNXImporter::populateNet(Net dstNet)
Mat blob_1 = getBlob(node_proto, constBlobs, 1);
CV_Assert(blob_0.size == blob_1.size);
Mat output = isSub ? (blob_0 - blob_1) : (blob_0 + blob_1);
constBlobs.insert(std::make_pair(layerParams.name, output));
addConstant(layerParams.name, output, constBlobs, outShapes);
continue;
}
else if (is_const_0 || is_const_1)
......@@ -670,7 +695,7 @@ void ONNXImporter::populateNet(Net dstNet)
{
CV_Assert(node_proto.input_size() == 0);
CV_Assert(layerParams.blobs.size() == 1);
constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0]));
addConstant(layerParams.name, layerParams.blobs[0], constBlobs, outShapes);
continue;
}
else if (layer_type == "LSTM")
......@@ -965,7 +990,7 @@ void ONNXImporter::populateNet(Net dstNet)
out = out.reshape(1, inp0.dims, inp0.size);
out.dims = inp0.dims; // to workaround dims == 1
constBlobs.insert(std::make_pair(layerParams.name, out));
addConstant(layerParams.name, out, constBlobs, outShapes);
continue;
}
}
......@@ -1033,7 +1058,7 @@ void ONNXImporter::populateNet(Net dstNet)
std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), transposed;
runLayer(layerParams, inputs, transposed);
CV_Assert(transposed.size() == 1);
constBlobs.insert(std::make_pair(layerParams.name, transposed[0]));
addConstant(layerParams.name, transposed[0], constBlobs, outShapes);
continue;
}
}
......@@ -1069,8 +1094,7 @@ void ONNXImporter::populateNet(Net dstNet)
Mat inp = getBlob(node_proto, constBlobs, 0);
Mat out = inp.reshape(1, outShape);
out.dims = outShape.size(); // to workaround dims == 1
constBlobs.insert(std::make_pair(layerParams.name, out));
outShapes[layerParams.name] = shape(out);
addConstant(layerParams.name, out, constBlobs, outShapes);
continue;
}
}
......@@ -1085,7 +1109,7 @@ void ONNXImporter::populateNet(Net dstNet)
std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
out_size.push_back(input.total(axis));
Mat output = input.reshape(1, out_size);
constBlobs.insert(std::make_pair(layerParams.name, output));
addConstant(layerParams.name, output, constBlobs, outShapes);
continue;
}
}
......@@ -1108,7 +1132,7 @@ void ONNXImporter::populateNet(Net dstNet)
}
Mat out = input.reshape(0, dims);
constBlobs.insert(std::make_pair(layerParams.name, out));
addConstant(layerParams.name, out, constBlobs, outShapes);
continue;
}
......@@ -1210,7 +1234,7 @@ void ONNXImporter::populateNet(Net dstNet)
if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
runLayer(layerParams, inputs, outputs);
constBlobs.insert(std::make_pair(layerParams.name, outputs[0]));
addConstant(layerParams.name, outputs[0], constBlobs, outShapes);
continue;
}
}
......@@ -1224,7 +1248,7 @@ void ONNXImporter::populateNet(Net dstNet)
if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
Mat input = getBlob(node_proto, constBlobs, 0);
Mat out = input.reshape(0, dim);
constBlobs.insert(std::make_pair(layerParams.name, out));
addConstant(layerParams.name, out, constBlobs, outShapes);
continue;
}
replaceLayerParam(layerParams, "shape", "dim");
......@@ -1233,6 +1257,21 @@ void ONNXImporter::populateNet(Net dstNet)
else if (layer_type == "Pad")
{
layerParams.type = "Padding";
replaceLayerParam(layerParams, "mode", "type");
if (node_proto.input_size() == 3 || node_proto.input_size() == 2)
{
// Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
// We need to shuffle it to begin0, end0, begin1, end1, ...
Mat paddings = getBlob(node_proto, constBlobs, 1).reshape(1, 2);
paddings = paddings.t();
layerParams.set("paddings", DictValue::arrayInt(paddings.ptr<int>(), paddings.total()));
if (node_proto.input_size() == 3)
{
Mat value = getBlob(node_proto, constBlobs, 2);
layerParams.set("value", value.at<float>(0));
}
}
}
else if (layer_type == "Shape")
{
......@@ -1246,7 +1285,7 @@ void ONNXImporter::populateNet(Net dstNet)
shapeMat.at<int>(j) = inpShape[j];
shapeMat.dims = 1;
constBlobs.insert(std::make_pair(layerParams.name, shapeMat));
addConstant(layerParams.name, shapeMat, constBlobs, outShapes);
continue;
}
else if (layer_type == "Cast")
......@@ -1268,7 +1307,7 @@ void ONNXImporter::populateNet(Net dstNet)
default: type = blob.type();
}
blob.convertTo(blob, type);
constBlobs.insert(std::make_pair(layerParams.name, blob));
addConstant(layerParams.name, blob, constBlobs, outShapes);
continue;
}
else
......@@ -1276,11 +1315,15 @@ void ONNXImporter::populateNet(Net dstNet)
}
else if (layer_type == "ConstantOfShape" || layer_type == "ConstantFill")
{
int depth = CV_32F;
float fill_value;
if (!layerParams.blobs.empty())
{
CV_Assert(!layerParams.has("value"));
fill_value = layerParams.blobs[0].at<float>(0, 0);
depth = layerParams.blobs[0].depth();
Mat floats;
layerParams.blobs[0].convertTo(floats, CV_32F);
fill_value = floats.at<float>(0, 0);
}
else
fill_value = layerParams.get("value", 0);
......@@ -1288,9 +1331,8 @@ void ONNXImporter::populateNet(Net dstNet)
MatShape inpShape = getBlob(node_proto, constBlobs, 0);
for (int i = 0; i < inpShape.size(); i++)
CV_CheckGT(inpShape[i], 0, "");
Mat tensor(inpShape.size(), &inpShape[0], CV_32F, Scalar(fill_value));
constBlobs.insert(std::make_pair(layerParams.name, tensor));
outShapes[node_proto.output(0)] = shape(tensor);
Mat tensor(inpShape.size(), &inpShape[0], depth, Scalar(fill_value));
addConstant(layerParams.name, tensor, constBlobs, outShapes);
continue;
}
else if (layer_type == "Gather")
......@@ -1320,7 +1362,7 @@ void ONNXImporter::populateNet(Net dstNet)
out = input.reshape(1, 1).colRange(index, index + 1);
out.dims = dims;
}
constBlobs.insert(std::make_pair(layerParams.name, out));
addConstant(layerParams.name, out, constBlobs, outShapes);
continue;
}
else if (layer_type == "Concat")
......@@ -1345,7 +1387,7 @@ void ONNXImporter::populateNet(Net dstNet)
runLayer(layerParams, inputs, concatenated);
CV_Assert(concatenated.size() == 1);
constBlobs.insert(std::make_pair(layerParams.name, concatenated[0]));
addConstant(layerParams.name, concatenated[0], constBlobs, outShapes);
continue;
}
}
......
......@@ -490,6 +490,12 @@ TEST_P(Test_ONNX_layers, LSTM_bidirectional)
testONNXModels("lstm_bidirectional", npy, 0, 0, false, false);
}
TEST_P(Test_ONNX_layers, Pad2d_Unfused)
{
testONNXModels("ReflectionPad2d");
testONNXModels("ZeroPad2d");
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
class Test_ONNX_nets : public Test_ONNX_layers
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册