提交 cfc78194 编写于 作者: L Lubov Batanina 提交者: Alexander Alekhin

Merge pull request #15811 from l-bat:eltwise_div

Supported ONNX Squeeze, ReduceL2 and Eltwise::DIV

* Support eltwise div

* Fix test

* OpenCL support added

* refactoring

* fix code style

* Only squeeze with axes supported
上级 af233753
......@@ -62,6 +62,7 @@ public:
PROD = 0,
SUM = 1,
MAX = 2,
DIV = 3
} op;
std::vector<float> coeffs;
bool variableChannels;
......@@ -79,6 +80,8 @@ public:
op = SUM;
else if (operation == "max")
op = MAX;
else if (operation == "div")
op = DIV;
else
CV_Error(cv::Error::StsBadArg, "Unknown operation type \"" + operation + "\"");
}
......@@ -271,6 +274,18 @@ public:
srcptr0 = (const float*)dstptr;
}
}
else if( op == DIV )
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = srcptr0[j]/srcptr1[j];
}
srcptr0 = (const float*)dstptr;
}
}
else if( op == MAX )
{
for( k = 1; k < n; k++ )
......@@ -393,6 +408,11 @@ public:
for (int i = 2; i < inputs.size(); ++i)
multiply(inputs[i], outputs[0], outputs[0]);
break;
case DIV:
divide(inputs[0], inputs[1], outputs[0]);
for (int i = 2; i < inputs.size(); ++i)
divide(outputs[0], inputs[i], outputs[0]);
break;
case MAX:
max(inputs[0], inputs[1], outputs[0]);
for (int i = 2; i < inputs.size(); ++i)
......@@ -486,6 +506,8 @@ public:
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM);
else if (op == PROD)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL);
else if (op == DIV)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::DIV);
else if (op == MAX)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX);
else
......
......@@ -520,19 +520,27 @@ void ONNXImporter::populateNet(Net dstNet)
}
else if (layer_type == "Div")
{
Mat blob = getBlob(node_proto, constBlobs, 1);
CV_Assert_N(blob.type() == CV_32F, blob.total());
if (blob.total() == 1)
if (constBlobs.find(node_proto.input(1)) == constBlobs.end())
{
layerParams.set("scale", 1.0f / blob.at<float>(0));
layerParams.type = "Power";
layerParams.type = "Eltwise";
layerParams.set("operation", "div");
}
else
{
layerParams.type = "Scale";
divide(1.0, blob, blob);
layerParams.blobs.push_back(blob);
layerParams.set("bias_term", false);
Mat blob = getBlob(node_proto, constBlobs, 1);
CV_Assert_N(blob.type() == CV_32F, blob.total());
if (blob.total() == 1)
{
layerParams.set("scale", 1.0f / blob.at<float>(0));
layerParams.type = "Power";
}
else
{
layerParams.type = "Scale";
divide(1.0, blob, blob);
layerParams.blobs.push_back(blob);
layerParams.set("bias_term", false);
}
}
}
else if (layer_type == "Neg")
......@@ -771,6 +779,32 @@ void ONNXImporter::populateNet(Net dstNet)
continue;
}
}
else if (layer_type == "ReduceL2")
{
CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
CV_Assert(graph_proto.node_size() > li + 1 && graph_proto.node(li + 1).op_type() == "Div");
++li;
layerParams.type = "Normalize";
DictValue axes_dict = layerParams.get("axes");
if (axes_dict.size() != 1)
CV_Error(Error::StsNotImplemented, "Multidimensional reduceL2");
int axis = axes_dict.getIntValue(0);
layerParams.set("axis",axis);
layerParams.set("end_axis", axis);
}
else if (layer_type == "Squeeze")
{
CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
DictValue axes_dict = layerParams.get("axes");
if (axes_dict.size() != 1)
CV_Error(Error::StsNotImplemented, "Multidimensional squeeze");
int axis = axes_dict.getIntValue(0);
layerParams.set("axis", axis - 1);
layerParams.set("end_axis", axis);
layerParams.type = "Flatten";
}
else if (layer_type == "Unsqueeze")
{
CV_Assert(node_proto.input_size() == 1);
......
......@@ -318,6 +318,28 @@ TEST_P(Test_ONNX_layers, MultyInputs)
expectNoFallbacksFromIE(net);
}
TEST_P(Test_ONNX_layers, Div)
{
const String model = _tf("models/div.onnx");
Net net = readNetFromONNX(model);
ASSERT_FALSE(net.empty());
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat inp1 = blobFromNPY(_tf("data/input_div_0.npy"));
Mat inp2 = blobFromNPY(_tf("data/input_div_1.npy"));
Mat ref = blobFromNPY(_tf("data/output_div.npy"));
checkBackend(&inp1, &ref);
net.setInput(inp1, "0");
net.setInput(inp2, "1");
Mat out = net.forward();
normAssert(ref, out, "", default_l1, default_lInf);
expectNoFallbacksFromIE(net);
}
TEST_P(Test_ONNX_layers, DynamicReshape)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
......@@ -333,6 +355,16 @@ TEST_P(Test_ONNX_layers, Reshape)
testONNXModels("unsqueeze");
}
TEST_P(Test_ONNX_layers, Squeeze)
{
testONNXModels("squeeze");
}
TEST_P(Test_ONNX_layers, ReduceL2)
{
testONNXModels("reduceL2");
}
TEST_P(Test_ONNX_layers, Slice)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册