onnx_importer.cpp 42.5 KB
Newer Older
1 2 3 4 5 6 7 8
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.

// Copyright (C) 2018, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.

#include "../precomp.hpp"
9
#include <opencv2/dnn/shape_utils.hpp>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28

#ifdef HAVE_PROTOBUF

#include <iostream>
#include <fstream>
#include <string>
#include <limits>
#include <algorithm>


#if defined(__GNUC__) && __GNUC__ >= 5
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
#include "opencv-onnx.pb.h"
#if defined(__GNUC__) && __GNUC__ >= 5
#pragma GCC diagnostic pop
#endif

D
Dmitry Kurtaev 已提交
29 30
#include "onnx_graph_simplifier.hpp"

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
namespace cv {
namespace dnn {
CV__DNN_EXPERIMENTAL_NS_BEGIN


class ONNXImporter
{
    opencv_onnx::ModelProto model_proto;
    struct LayerInfo {
        int layerId;
        int outputId;
        LayerInfo(int _layerId, int _outputId) : layerId(_layerId), outputId(_outputId) {}
    };

    std::map<std::string, Mat> getGraphTensors(
                                    const opencv_onnx::GraphProto& graph_proto);
    Mat getBlob(const opencv_onnx::NodeProto& node_proto, const std::map<std::string, Mat>& constBlobs, int index);

    LayerParams getLayerParams(const opencv_onnx::NodeProto& node_proto);
    bool isCeilMode(const LayerParams& layerParams);

public:

    ONNXImporter(const char *onnxFile)
    {
        std::fstream input(onnxFile, std::ios::in | std::ios::binary);

        if (!model_proto.ParseFromIstream(&input))
            CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model");
    }

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
    ONNXImporter(const char* buffer, size_t sizeBuffer)
    {
        struct _Buf : public std::streambuf
        {
            _Buf(const char* buffer, size_t sizeBuffer)
            {
                char* p = const_cast<char*>(buffer);
                setg(p, p, p + sizeBuffer);
            }
        };

        _Buf buf(buffer, sizeBuffer);
        std::istream input(&buf);

        if (!model_proto.ParseFromIstream(&input))
            CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model from in-memory byte array.");
    }

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    void populateNet(Net dstNet);
};

inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
{
    if (layerParams.has(oldKey)) {
        layerParams.set(newKey, layerParams.get(oldKey));
        layerParams.erase(oldKey);
    }
}

void releaseONNXTensor(opencv_onnx::TensorProto& tensor_proto)
{
    if (!tensor_proto.raw_data().empty()) {
        delete tensor_proto.release_raw_data();
    }
}

98
void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
99 100
              std::vector<Mat>& outputs)
{
101
    Ptr<Layer> layer = LayerFactory::createLayerInstance(params.type, params);
A
Alexander Alekhin 已提交
102 103
    CV_Assert((bool)layer);

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
    std::vector<MatShape> inpShapes(inputs.size());
    int ddepth = CV_32F;
    for (size_t i = 0; i < inputs.size(); ++i)
    {
        inpShapes[i] = shape(inputs[i]);
        if (i > 0 && ddepth != inputs[i].depth())
            CV_Error(Error::StsNotImplemented, "Mixed input data types.");
        ddepth = inputs[i].depth();
    }

    std::vector<MatShape> outShapes, internalShapes;
    layer->getMemoryShapes(inpShapes, 0, outShapes, internalShapes);

    std::vector<Mat> internals(internalShapes.size());
    outputs.resize(outShapes.size());
    for (size_t i = 0; i < outShapes.size(); ++i)
        outputs[i].create(outShapes[i], ddepth);
    for (size_t i = 0; i < internalShapes.size(); ++i)
        internals[i].create(internalShapes[i], ddepth);

    layer->finalize(inputs, outputs);
    layer->forward(inputs, outputs, internals);
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
std::map<std::string, Mat> ONNXImporter::getGraphTensors(
                                        const opencv_onnx::GraphProto& graph_proto)
{
  opencv_onnx::TensorProto tensor_proto;
  std::map<std::string, Mat> layers_weights;

  for (int i = 0; i < graph_proto.initializer_size(); i++)
  {
    tensor_proto = graph_proto.initializer(i);
    Mat mat = getMatFromTensor(tensor_proto);
    releaseONNXTensor(tensor_proto);
    layers_weights.insert(std::make_pair(tensor_proto.name(), mat));
  }
  return layers_weights;
}

144 145 146 147 148 149
static DictValue parse(const ::google::protobuf::RepeatedField< ::google::protobuf::int64>& src) {
    std::vector<int32_t> dst(src.size());
    convertInt64ToInt32(src, dst, src.size());
    return DictValue::arrayInt(&dst[0], src.size());
}

150 151 152 153 154 155 156 157 158 159
LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_proto)
{
    LayerParams lp;
    for(int i = 0; i < node_proto.attribute_size(); i++)
    {
        opencv_onnx::AttributeProto attribute_proto = node_proto.attribute(i);
        std::string attribute_name = attribute_proto.name();

        if(attribute_name == "kernel_shape")
        {
160 161
            CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
            lp.set("kernel_size", parse(attribute_proto.ints()));
162 163 164
        }
        else if(attribute_name == "strides")
        {
165 166
            CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
            lp.set("stride", parse(attribute_proto.ints()));
167 168 169
        }
        else if(attribute_name == "pads")
        {
D
Dmitry Kurtaev 已提交
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
            if (node_proto.op_type() == "Pad")
            {
                // Padding layer.
                // Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
                // We need to shuffle it to begin0, end0, begin1, end1, ...
                CV_Assert(attribute_proto.ints_size() % 2 == 0);
                const int dims = attribute_proto.ints_size() / 2;
                std::vector<int32_t> paddings;
                paddings.reserve(attribute_proto.ints_size());
                for (int i = 0; i < dims; ++i)
                {
                    paddings.push_back(attribute_proto.ints(i));
                    paddings.push_back(attribute_proto.ints(dims + i));
                }
                lp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
            }
            else
            {
                // Convolution or pooling.
189 190
                CV_Assert(attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6);
                lp.set("pad", parse(attribute_proto.ints()));
D
Dmitry Kurtaev 已提交
191
            }
192 193 194 195 196 197 198 199 200 201 202 203
        }
        else if(attribute_name == "auto_pad")
        {
            if (attribute_proto.s() == "SAME_UPPER" || attribute_proto.s() == "SAME_LOWER") {
                lp.set("pad_mode",  "SAME");
            }
            else if (attribute_proto.s() == "VALID") {
                lp.set("pad_mode", "VALID");
            }
        }
        else if(attribute_name == "dilations")
        {
204 205
            CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3);
            lp.set("dilation", parse(attribute_proto.ints()));
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
        }
        else if (attribute_proto.has_i())
        {
            ::google::protobuf::int64 src = attribute_proto.i();
            if (src < std::numeric_limits<int32_t>::min() || src > std::numeric_limits<int32_t>::max())
                CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
            else
                lp.set(attribute_name, saturate_cast<int32_t>(src));
        }
        else if (attribute_proto.has_f())
        {
            lp.set(attribute_name, attribute_proto.f());
        }
        else if (attribute_proto.has_s())
        {
            lp.set(attribute_name, attribute_proto.s());
        }
        else if (attribute_proto.floats_size() > 0)
        {
            lp.set(attribute_name, DictValue::arrayReal(
D
Dmitry Kurtaev 已提交
226
                attribute_proto.floats().data(), attribute_proto.floats_size()));
227 228 229
        }
        else if (attribute_proto.ints_size() > 0)
        {
230
            lp.set(attribute_proto.name(), parse(attribute_proto.ints()));
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
        }
        else if (attribute_proto.has_t())
        {
            opencv_onnx::TensorProto tensor = attribute_proto.t();
            Mat blob = getMatFromTensor(tensor);
            lp.blobs.push_back(blob);
        }
        else if (attribute_proto.has_g() || attribute_proto.strings_size() > 0 ||
                    attribute_proto.tensors_size() > 0 || attribute_proto.graphs_size() > 0)
        {
                CV_Error(Error::StsNotImplemented, "Unexpected attribute type");
        }
        else
            CV_Error(Error::StsNotImplemented, "Unsupported attribute type");
    }
    return lp;
}

Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto,
                    const std::map<std::string, Mat>& constBlobs, int index)
{
    CV_Assert(index < node_proto.input_size());
    std::map<std::string, Mat>::const_iterator constBlob;
    constBlob = constBlobs.find(node_proto.input(index));
    if (constBlob == constBlobs.end()) {
        CV_Error(Error::StsObjectNotFound,
             "Blob " + node_proto.input(index) + " not found in const blobs");
    }
    return constBlob->second;
}

void ONNXImporter::populateNet(Net dstNet)
{
    CV_Assert(model_proto.has_graph());
    opencv_onnx::GraphProto graph_proto = model_proto.graph();
D
Dmitry Kurtaev 已提交
266 267 268

    simplifySubgraphs(graph_proto);

269
    std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
    // List of internal blobs shapes.
    std::map<std::string, MatShape> outShapes;
    // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
    for (int i = 0; i < graph_proto.input_size(); ++i)
    {
        opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
        CV_Assert(valueInfoProto.has_type());
        opencv_onnx::TypeProto typeProto = valueInfoProto.type();
        CV_Assert(typeProto.has_tensor_type());
        opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type();
        CV_Assert(tensor.has_shape());
        opencv_onnx::TensorShapeProto tensorShape = tensor.shape();

        MatShape inpShape(tensorShape.dim_size());
        for (int j = 0; j < inpShape.size(); ++j)
        {
            inpShape[j] = tensorShape.dim(j).dim_value();
        }
        outShapes[valueInfoProto.name()] = inpShape;
    }
290 291 292 293 294 295 296 297 298

    std::string framework_name;
    if (model_proto.has_producer_name()) {
        framework_name = model_proto.producer_name();
    }

    // create map with network inputs (without const blobs)
    std::map<std::string, LayerInfo> layer_id;
    std::map<std::string, LayerInfo>::iterator layerId;
299
    std::map<std::string, MatShape>::iterator shapeIt;
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    // fill map: push layer name, layer id and output id
    std::vector<String> netInputs;
    for (int j = 0; j < graph_proto.input_size(); j++)
    {
        const std::string& name = graph_proto.input(j).name();
        if (constBlobs.find(name) == constBlobs.end()) {
            netInputs.push_back(name);
            layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
        }
    }
    dstNet.setInputsNames(netInputs);

    int layersSize = graph_proto.node_size();
    LayerParams layerParams;
    opencv_onnx::NodeProto node_proto;

316
    for(int li = 0; li < layersSize; li++)
317
    {
318
        node_proto = graph_proto.node(li);
319 320 321 322 323 324 325
        layerParams = getLayerParams(node_proto);
        CV_Assert(node_proto.output_size() >= 1);
        layerParams.name = node_proto.output(0);

        std::string layer_type = node_proto.op_type();
        layerParams.type = layer_type;

326

327 328 329 330
        if (layer_type == "MaxPool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "MAX");
331
            layerParams.set("ceil_mode", layerParams.has("pad_mode"));
332 333 334 335 336
        }
        else if (layer_type == "AveragePool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "AVE");
337
            layerParams.set("ceil_mode", layerParams.has("pad_mode"));
338 339
            layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
        }
340
        else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool" || layer_type == "ReduceMean")
341
        {
342
            CV_Assert(node_proto.input_size() == 1);
343
            layerParams.type = "Pooling";
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
            layerParams.set("pool", layer_type == "GlobalMaxPool"? "MAX" : "AVE");
            layerParams.set("global_pooling", layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool");

            if (layer_type == "ReduceMean")
            {
                if (layerParams.get<int>("keepdims") == 0 || !layerParams.has("axes"))
                    CV_Error(Error::StsNotImplemented, "Unsupported mode of ReduceMean operation.");

                MatShape inpShape = outShapes[node_proto.input(0)];
                if (inpShape.size() != 4 && inpShape.size() != 5)
                    CV_Error(Error::StsNotImplemented, "Unsupported input shape of reduce_mean operation.");

                DictValue axes = layerParams.get("axes");
                CV_Assert(axes.size() <= inpShape.size() - 2);
                std::vector<int> kernel_size(inpShape.size() - 2, 1);
                for (int i = 0; i < axes.size(); i++) {
                    int axis = axes.get<int>(i);
                    CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
                    kernel_size[axis - 2] = inpShape[axis];
                }

                layerParams.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size()));
            }
367
        }
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
        else if (layer_type == "Slice")
        {
            if (layerParams.has("steps")) {
                DictValue steps = layerParams.get("steps");
                for (int i = 0; i < steps.size(); ++i) {
                    if (steps.get<int>(i) != 1)
                        CV_Error(Error::StsNotImplemented,
                                 "Slice layer only supports steps = 1");
                }
            }

            int axis = 0;
            if (layerParams.has("axes")) {
                DictValue axes = layerParams.get("axes");
                for (int i = 1; i < axes.size(); ++i) {
                    CV_Assert(axes.get<int>(i - 1) == axes.get<int>(i) - 1);
                }
                axis = axes.get<int>(0);
            }
            layerParams.set("axis", axis);

            DictValue starts = layerParams.get("starts");
            DictValue ends = layerParams.get("ends");
            CV_Assert(starts.size() == ends.size());

            std::vector<int> begin;
            std::vector<int> end;
            if (axis > 0) {
                begin.resize(axis, 0);
                end.resize(axis, -1);
            }

            for (int i = 0; i < starts.size(); ++i)
            {
                begin.push_back(starts.get<int>(i));
                int finish = ends.get<int>(i);
                end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
            }
            layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
            layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
408 409 410
         }
        else if (layer_type == "Split")
        {
411 412 413 414 415
            if (layerParams.has("split"))
            {
                DictValue splits = layerParams.get("split");
                const int numSplits = splits.size();
                CV_Assert(numSplits > 1);
416

417 418 419 420 421 422 423 424
                std::vector<int> slicePoints(numSplits - 1, splits.get<int>(0));
                for (int i = 1; i < splits.size() - 1; ++i)
                {
                    slicePoints[i] = slicePoints[i - 1] + splits.get<int>(i - 1);
                }
                layerParams.set("slice_point", DictValue::arrayInt(&slicePoints[0], slicePoints.size()));
            }
            else
425
            {
426
                layerParams.set("num_split", node_proto.output_size());
427 428
            }
            layerParams.type = "Slice";
429
        }
D
Dmitry Kurtaev 已提交
430
        else if (layer_type == "Add" || layer_type == "Sum" || layer_type == "Sub")
431
        {
D
Dmitry Kurtaev 已提交
432 433
            bool isSub = layer_type == "Sub";
            CV_CheckEQ(node_proto.input_size(), 2, "");
434 435 436
            bool is_const_0 = layer_id.find(node_proto.input(0)) == layer_id.end();
            bool is_const_1 = layer_id.find(node_proto.input(1)) == layer_id.end();
            if (is_const_0 && is_const_1)
437
            {
438 439 440 441 442 443 444 445 446 447
                Mat blob_0 = getBlob(node_proto, constBlobs, 0);
                Mat blob_1 = getBlob(node_proto, constBlobs, 1);
                CV_Assert(blob_0.size == blob_1.size);
                Mat output = isSub ? (blob_0 - blob_1) : (blob_0 + blob_1);
                constBlobs.insert(std::make_pair(layerParams.name, output));
                continue;
            }
            else if (is_const_0 || is_const_1)
            {
                Mat blob = getBlob(node_proto, constBlobs, is_const_0 ? 0 : 1);
448 449 450
                blob = blob.reshape(1, 1);
                if (blob.total() == 1) {
                    layerParams.type = "Power";
D
Dmitry Kurtaev 已提交
451
                    layerParams.set("shift", (isSub ? -1 : 1) * blob.at<float>(0));
452 453
                }
                else {
D
Dmitry Kurtaev 已提交
454 455
                    layerParams.type = "Scale";
                    layerParams.set("bias_term", true);
D
Dmitry Kurtaev 已提交
456
                    layerParams.blobs.push_back((isSub ? -1 : 1) * blob);
457 458
                }
            }
D
Dmitry Kurtaev 已提交
459 460
            else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)])
            {
461
                layerParams.type = "Eltwise";
D
Dmitry Kurtaev 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
                if (isSub)
                {
                    static float subCoeffs[] = {1.f, -1.f};
                    layerParams.set("coeff", DictValue::arrayReal<float*>(subCoeffs, 2));
                }
            }
            else
            {
                if (isSub)
                {
                    LayerParams powerParams;
                    powerParams.name = layerParams.name + "/neg";
                    powerParams.type = "Power";
                    powerParams.set("scale", -1);

                    //Create Power layer
                    int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
                    //Connect to input
                    layerId = layer_id.find(node_proto.input(1));
                    CV_Assert(layerId != layer_id.end());
                    dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
                    //Add shape
                    layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
                    outShapes[powerParams.name] = outShapes[node_proto.input(1)];

                    //Replace input to Power
                    node_proto.set_input(1, powerParams.name);
                }
                layerParams.type = "Scale";
                layerParams.set("bias_term", true);
492 493
            }
        }
494 495 496 497 498
        else if (layer_type == "Max")
        {
            layerParams.type = "Eltwise";
            layerParams.set("operation", "max");
        }
499 500 501 502 503
        else if (layer_type == "Neg")
        {
            layerParams.type = "Power";
            layerParams.set("scale", -1);
        }
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
        else if (layer_type == "Constant")
        {
            CV_Assert(node_proto.input_size() == 0);
            CV_Assert(layerParams.blobs.size() == 1);
            constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0]));
            continue;
        }
        else if (layer_type == "ImageScaler")
        {
            const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f;
            layerParams.erase("scale");

            if (layerParams.has("bias"))
            {
                layerParams.type = "Scale";
                layerParams.blobs.push_back(
                    Mat(Size(1,  layerParams.get("bias").size()), CV_32FC1, scale));

                layerParams.set("bias_term", true);
                Mat bias(1, layerParams.get("bias").size(), CV_32FC1);
                for (int j = 0; j < bias.total(); j++) {
                    bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j);
                }
                layerParams.blobs.push_back(bias);
                layerParams.erase("bias");
            }
            else {
                layerParams.set("scale", scale);
                layerParams.type = "Power";
            }
        }
535 536 537 538 539 540 541
        else if (layer_type == "Clip")
        {
            layerParams.type = "ReLU6";
            replaceLayerParam(layerParams, "min", "min_value");
            replaceLayerParam(layerParams, "max", "max_value");

        }
542 543 544 545 546 547 548 549 550
        else if (layer_type == "LeakyRelu")
        {
            layerParams.type = "ReLU";
            replaceLayerParam(layerParams, "alpha", "negative_slope");
        }
        else if (layer_type == "LRN")
        {
            replaceLayerParam(layerParams, "size", "local_size");
        }
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
        else if (layer_type == "InstanceNormalization")
        {
            if (node_proto.input_size() != 3)
                CV_Error(Error::StsNotImplemented,
                         "Expected input, scale, bias");

            layerParams.blobs.resize(4);
            layerParams.blobs[2] = getBlob(node_proto, constBlobs, 1);  // weightData
            layerParams.blobs[3] = getBlob(node_proto, constBlobs, 2);  // biasData
            layerParams.set("has_bias", true);
            layerParams.set("has_weight", true);

            // Get number of channels in input
            int size = layerParams.blobs[2].total();
            layerParams.blobs[0] = Mat::zeros(size, 1, CV_32F); // mean
            layerParams.blobs[1] = Mat::ones(size, 1, CV_32F); // std

            LayerParams mvnParams;
            mvnParams.name = layerParams.name + "/MVN";
            mvnParams.type = "MVN";
            mvnParams.set("eps", layerParams.get<float>("epsilon"));
            layerParams.erase("epsilon");

            //Create MVN layer
            int id = dstNet.addLayer(mvnParams.name, mvnParams.type, mvnParams);
            //Connect to input
            layerId = layer_id.find(node_proto.input(0));
            CV_Assert(layerId != layer_id.end());
            dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
            //Add shape
            layer_id.insert(std::make_pair(mvnParams.name, LayerInfo(id, 0)));
            outShapes[mvnParams.name] = outShapes[node_proto.input(0)];

            //Replace Batch Norm's input to MVN
            node_proto.set_input(0, mvnParams.name);
            layerParams.type = "BatchNorm";
        }
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
        else if (layer_type == "BatchNormalization")
        {
            if (node_proto.input_size() != 5)
                CV_Error(Error::StsNotImplemented,
                         "Expected input, scale, bias, mean and var");

            layerParams.type = "BatchNorm";
            replaceLayerParam(layerParams, "epsilon", "eps");
            replaceLayerParam(layerParams, "spatial", "use_global_stats");

            Mat meanData = getBlob(node_proto, constBlobs, 3);
            Mat stdData =  getBlob(node_proto, constBlobs, 4);

            layerParams.blobs.push_back(meanData);
            layerParams.blobs.push_back(stdData);

            if (!node_proto.input(1).empty()) {
                layerParams.set("has_weight", true);
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));  // weightData
            } else {
                layerParams.set("has_weight", false);
            }

            if (!node_proto.input(2).empty()) {
                layerParams.set("has_bias", true);
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
            } else {
                layerParams.set("has_bias", false);
            }
        }
        else if (layer_type == "Gemm")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "InnerProduct";
            Mat weights = getBlob(node_proto, constBlobs, 1);
            int ind_num_out = 0;
            if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
                transpose(weights, weights);
                ind_num_out = 1;
            }
            layerParams.blobs.push_back(weights);

            if (node_proto.input_size() == 3) {
                Mat bias = getBlob(node_proto, constBlobs, 2);
                layerParams.blobs.push_back(bias);
            }

            layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
        else if (layer_type == "MatMul")
        {
            CV_Assert(node_proto.input_size() == 2);
            layerParams.type = "InnerProduct";
            Mat blob = getBlob(node_proto, constBlobs, 1);
            layerParams.blobs.push_back(blob.t());
            layerParams.set("bias_term", false);
            layerParams.set("num_output", layerParams.blobs[0].size[0]);
        }
647
        else if (layer_type == "Mul" || layer_type == "Div")
648 649
        {
            CV_Assert(node_proto.input_size() == 2);
650 651 652 653 654 655 656 657 658 659 660 661 662 663

            bool isDiv = layer_type == "Div";
            int constId = -1;
            bool haveVariables = false;
            for (int i = 0; i < 2; ++i)
            {
                if (constBlobs.find(node_proto.input(i)) != constBlobs.end())
                    constId = i;
                else
                    haveVariables = true;
            }
            if (constId != -1 && haveVariables)
            {
                Mat blob = getBlob(node_proto, constBlobs, constId);
664 665
                blob = blob.reshape(1, 1);
                if (blob.total() == 1) {
666 667
                    float coeff = isDiv ? 1.0 / blob.at<float>(0) : blob.at<float>(0);
                    layerParams.set("scale", coeff);
668 669 670
                    layerParams.type = "Power";
                }
                else {
671 672
                    if (isDiv)
                        divide(1.0, blob, blob);
673 674 675 676
                    layerParams.blobs.push_back(blob);
                    layerParams.type = "Scale";
                }
            }
D
Dmitry Kurtaev 已提交
677 678
            else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)])
            {
679
                layerParams.type = "Eltwise";
680 681
                layerParams.set("operation", isDiv ? "div" : "prod");
            }
D
Dmitry Kurtaev 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
            else
            {
                if (isDiv)
                {
                    LayerParams powerParams;
                    powerParams.name = layerParams.name + "/inv";
                    powerParams.type = "Power";
                    powerParams.set("power", -1);

                    //Create Power layer
                    int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams);
                    //Connect to input
                    layerId = layer_id.find(node_proto.input(1));
                    CV_Assert(layerId != layer_id.end());
                    dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0);
                    //Add shape
                    layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0)));
                    outShapes[powerParams.name] = outShapes[node_proto.input(1)];

                    //Replace input to Power
                    node_proto.set_input(1, powerParams.name);
                }
                layerParams.type = "Scale";
            }
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723

            if (!haveVariables)
            {
                Mat inp0 = getBlob(node_proto, constBlobs, 0);
                Mat inp1 = getBlob(node_proto, constBlobs, 1);
                if (inp0.size != inp1.size)
                    CV_Error(Error::StsNotImplemented, "Constant multiply with different shapes");

                Mat out;
                if (isDiv)
                    divide(inp0, inp1, out);
                else
                    multiply(inp0, inp1, out);

                out = out.reshape(1, inp0.dims, inp0.size);
                out.dims = inp0.dims;  // to workaround dims == 1
                constBlobs.insert(std::make_pair(layerParams.name, out));
                continue;
724 725 726 727 728 729 730 731 732 733 734 735
            }
        }
        else if (layer_type == "Conv")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "Convolution";
            for (int j = 1; j < node_proto.input_size(); j++) {
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
            layerParams.set("num_output", layerParams.blobs[0].size[0]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
736 737 738 739 740 741 742
        else if (layer_type == "ConvTranspose")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "Deconvolution";
            for (int j = 1; j < node_proto.input_size(); j++) {
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
A
Ayush Pandey 已提交
743
            layerParams.set("num_output", layerParams.blobs[0].size[1] * layerParams.get<int>("group", 1));
744
            layerParams.set("bias_term", node_proto.input_size() == 3);
745

746 747 748 749
            if (!layerParams.has("kernel_size"))
                CV_Error(Error::StsNotImplemented,
                         "Required attribute 'kernel_size' is not present.");

750 751 752
            if (layerParams.has("output_shape"))
            {
                const DictValue& outShape = layerParams.get("output_shape");
753 754
                DictValue strides = layerParams.get("stride");
                DictValue kernel = layerParams.get("kernel_size");
755

756 757 758
                String padMode;
                std::vector<int> adjust_pads;
                if (layerParams.has("pad_mode"))
759
                {
760 761 762 763 764 765 766 767 768 769 770 771
                    padMode = toUpperCase(layerParams.get<String>("pad_mode"));
                    if (padMode != "SAME" && padMode != "VALID")
                        CV_Error(Error::StsError, "Unsupported padding mode " + padMode);

                    for (int i = 0; i < strides.size(); i++)
                    {
                        int sz = outShape.get<int>(2 + i);
                        int stride = strides.get<int>(i);
                        adjust_pads.push_back(padMode == "SAME"? (sz - 1) % stride :
                                                                 (sz - kernel.get<int>(i)) % stride);
                    }
                    layerParams.set("adj", DictValue::arrayInt(&adjust_pads[0], adjust_pads.size()));
772 773
                }
            }
L
Liubov Batanina 已提交
774 775
            else if (layerParams.has("output_padding"))
            {
776
                replaceLayerParam(layerParams, "output_padding", "adj");
L
Liubov Batanina 已提交
777
            }
778
        }
779 780 781 782
        else if (layer_type == "Transpose")
        {
            layerParams.type = "Permute";
            replaceLayerParam(layerParams, "perm", "order");
783 784 785 786 787 788 789 790 791 792

            CV_Assert(node_proto.input_size() == 1);
            if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
            {
                std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), transposed;
                runLayer(layerParams, inputs, transposed);
                CV_Assert(transposed.size() == 1);
                constBlobs.insert(std::make_pair(layerParams.name, transposed[0]));
                continue;
            }
793
        }
794 795 796 797 798
        else if (layer_type == "ReduceL2")
        {
            CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
            CV_Assert(graph_proto.node_size() > li + 1 && graph_proto.node(li + 1).op_type() == "Div");
            ++li;
L
Liubov Batanina 已提交
799 800
            node_proto = graph_proto.node(li);
            layerParams.name = node_proto.output(0);
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
            layerParams.type = "Normalize";

            DictValue axes_dict = layerParams.get("axes");
            if (axes_dict.size() != 1)
                CV_Error(Error::StsNotImplemented, "Multidimensional reduceL2");
            int axis = axes_dict.getIntValue(0);
            layerParams.set("axis",axis);
            layerParams.set("end_axis", axis);
        }
        else if (layer_type == "Squeeze")
        {
            CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
            DictValue axes_dict = layerParams.get("axes");
            if (axes_dict.size() != 1)
                CV_Error(Error::StsNotImplemented, "Multidimensional squeeze");

            int axis = axes_dict.getIntValue(0);
            layerParams.set("axis", axis - 1);
            layerParams.set("end_axis", axis);
            layerParams.type = "Flatten";
        }
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
        else if (layer_type == "Flatten")
        {
            CV_CheckEQ(node_proto.input_size(), 1, "");
            if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
            {
                Mat input = getBlob(node_proto, constBlobs, 0);
                int axis = clamp(layerParams.get<int>("axis", 1), input.dims);

                std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
                out_size.push_back(input.total(axis));
                Mat output = input.reshape(1, out_size);
                constBlobs.insert(std::make_pair(layerParams.name, output));
                continue;
            }
        }
837 838 839 840
        else if (layer_type == "Unsqueeze")
        {
            CV_Assert(node_proto.input_size() == 1);
            DictValue axes = layerParams.get("axes");
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
            if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
            {
                // Constant input.
                Mat input = getBlob(node_proto, constBlobs, 0);

                std::vector<int> dims;
                for (int j = 0; j < input.dims; j++) {
                    dims.push_back(input.size[j]);
                }
                CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size());
                for (int j = 0; j < axes.size(); j++) {
                    dims.insert(dims.begin() + axes.getIntValue(j), 1);
                }

                Mat out = input.reshape(0, dims);
                constBlobs.insert(std::make_pair(layerParams.name, out));
                continue;
858 859
            }

860 861 862 863
            // Variable input.
            if (axes.size() != 1)
                CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");

864 865 866 867 868
            MatShape inpShape = outShapes[node_proto.input(0)];
            int axis = axes.getIntValue(0);
            CV_Assert(0 <= axis && axis <= inpShape.size());
            std::vector<int> outShape = inpShape;
            outShape.insert(outShape.begin() + axis, 1);
869
            layerParams.type = "Reshape";
870
            layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
871 872 873 874 875 876 877 878 879
        }
        else if (layer_type == "Reshape")
        {
            CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));

            if (node_proto.input_size() == 2) {
                Mat blob = getBlob(node_proto, constBlobs, 1);
                CV_Assert(blob.type() == CV_32SC1);

880 881 882
                layerParams.set("dim", DictValue::arrayInt<int*>(
                            blob.ptr<int>(), blob.total() ));

883
                if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
884 885 886
                    std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
                    runLayer(layerParams, inputs, outputs);
                    constBlobs.insert(std::make_pair(layerParams.name, outputs[0]));
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
                    continue;
                }
            }
            else {
                DictValue shape = layerParams.get("shape");
                std::vector<int> dim;
                for (int j = 0; j < shape.size(); j++) {
                    dim.push_back(shape.getIntValue(j));
                }

                if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
                    Mat input = getBlob(node_proto, constBlobs, 0);
                    Mat out = input.reshape(0, dim);
                    constBlobs.insert(std::make_pair(layerParams.name, out));
                    continue;
                }
                replaceLayerParam(layerParams, "shape", "dim");
            }
        }
D
Dmitry Kurtaev 已提交
906 907 908 909
        else if (layer_type == "Pad")
        {
            layerParams.type = "Padding";
        }
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
        else if (layer_type == "Shape")
        {
            CV_Assert(node_proto.input_size() == 1);
            shapeIt = outShapes.find(node_proto.input(0));
            CV_Assert(shapeIt != outShapes.end());
            MatShape inpShape = shapeIt->second;

            Mat shapeMat(inpShape.size(), 1, CV_32S);
            for (int j = 0; j < inpShape.size(); ++j)
                shapeMat.at<int>(j) = inpShape[j];
            shapeMat.dims = 1;

            constBlobs.insert(std::make_pair(layerParams.name, shapeMat));
            continue;
        }
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
        else if (layer_type == "Cast")
        {
            if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
            {
                Mat blob = getBlob(node_proto, constBlobs, 0);
                int type;
                switch (layerParams.get<int>("to"))
                {
                    case opencv_onnx::TensorProto_DataType_FLOAT:   type = CV_32F; break;
                    case opencv_onnx::TensorProto_DataType_UINT8:   type = CV_8U; break;
                    case opencv_onnx::TensorProto_DataType_UINT16:  type = CV_16U; break;
                    case opencv_onnx::TensorProto_DataType_FLOAT16: type = CV_16S; break;
                    case opencv_onnx::TensorProto_DataType_INT8:
                    case opencv_onnx::TensorProto_DataType_INT16:
                    case opencv_onnx::TensorProto_DataType_INT32:
                    case opencv_onnx::TensorProto_DataType_INT64:   type = CV_32S; break;
                    default: type = blob.type();
                }
                blob.convertTo(blob, type);
                constBlobs.insert(std::make_pair(layerParams.name, blob));
                continue;
            }
            else
                layerParams.type = "Identity";
        }
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
        else if (layer_type == "Gather")
        {
            CV_Assert(node_proto.input_size() == 2);
            CV_Assert(layerParams.has("axis"));
            Mat input = getBlob(node_proto, constBlobs, 0);
            Mat indexMat = getBlob(node_proto, constBlobs, 1);
            CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
            int index = indexMat.at<int>(0);
            int axis = layerParams.get<int>("axis");

            std::vector<cv::Range> ranges(input.dims, Range::all());
            ranges[axis] = Range(index, index + 1);

            Mat out = input(ranges);
            constBlobs.insert(std::make_pair(layerParams.name, out));
            continue;
        }
        else if (layer_type == "Concat")
        {
            bool hasVariableInps = false;
            for (int i = 0; i < node_proto.input_size(); ++i)
            {
                if (layer_id.find(node_proto.input(i)) != layer_id.end())
                {
                    hasVariableInps = true;
                    break;
                }
            }

            if (!hasVariableInps)
            {
                std::vector<Mat> inputs(node_proto.input_size()), concatenated;
                for (size_t i = 0; i < inputs.size(); ++i)
                {
                    inputs[i] = getBlob(node_proto, constBlobs, i);
                }
986
                runLayer(layerParams, inputs, concatenated);
987 988 989 990 991 992

                CV_Assert(concatenated.size() == 1);
                constBlobs.insert(std::make_pair(layerParams.name, concatenated[0]));
                continue;
            }
        }
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
        else if (layer_type == "Upsample")
        {
            layerParams.type = "Resize";
            if (layerParams.has("scales"))
            {
                // Pytorch layer
                DictValue scales = layerParams.get("scales");
                CV_Assert(scales.size() == 4);
                layerParams.set("zoom_factor_y", scales.getIntValue(2));
                layerParams.set("zoom_factor_x", scales.getIntValue(3));
            }
            else
            {
                // Caffe2 layer
                replaceLayerParam(layerParams, "height_scale", "zoom_factor_y");
                replaceLayerParam(layerParams, "width_scale", "zoom_factor_x");
            }
            replaceLayerParam(layerParams, "mode", "interpolation");
1011 1012 1013 1014 1015 1016 1017 1018 1019

            if (layerParams.get<String>("interpolation") == "linear" && framework_name == "pytorch") {
                layerParams.type = "Resize";
                Mat scales = getBlob(node_proto, constBlobs, 1);
                CV_Assert(scales.total() == 4);
                layerParams.set("interpolation", "opencv_linear");
                layerParams.set("zoom_factor_y", scales.at<float>(2));
                layerParams.set("zoom_factor_x", scales.at<float>(3));
            }
1020
        }
D
dianlujitao 已提交
1021 1022 1023 1024 1025
        else if (layer_type == "LogSoftmax")
        {
            layerParams.type = "Softmax";
            layerParams.set("log_softmax", true);
        }
1026 1027 1028 1029 1030 1031
        else
        {
            for (int j = 0; j < node_proto.input_size(); j++) {
                if (layer_id.find(node_proto.input(j)) == layer_id.end())
                    layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
D
dianlujitao 已提交
1032 1033 1034
        }

        int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
1035 1036 1037 1038
        for (int i = 0; i < node_proto.output_size(); ++i)
        {
            layer_id.insert(std::make_pair(node_proto.output(i), LayerInfo(id, i)));
        }
D
dianlujitao 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054

        std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
        for (int j = 0; j < node_proto.input_size(); j++) {
            layerId = layer_id.find(node_proto.input(j));
            if (layerId != layer_id.end()) {
                dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j);
                // Collect input shapes.
                shapeIt = outShapes.find(node_proto.input(j));
                CV_Assert(shapeIt != outShapes.end());
                layerInpShapes.push_back(shapeIt->second);
            }
        }

        // Compute shape of output blob for this layer.
        Ptr<Layer> layer = dstNet.getLayer(id);
        layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
1055 1056 1057 1058
        for (int i = 0; i < node_proto.output_size() && i < (int)layerOutShapes.size(); ++i)
        {
            outShapes[node_proto.output(i)] = layerOutShapes[i];
        }
D
dianlujitao 已提交
1059 1060
    }
}
1061 1062 1063 1064 1065 1066 1067 1068 1069

Net readNetFromONNX(const String& onnxFile)
{
    ONNXImporter onnxImporter(onnxFile.c_str());
    Net net;
    onnxImporter.populateNet(net);
    return net;
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
Net readNetFromONNX(const char* buffer, size_t sizeBuffer)
{
    ONNXImporter onnxImporter(buffer, sizeBuffer);
    Net net;
    onnxImporter.populateNet(net);
    return net;
}

Net readNetFromONNX(const std::vector<uchar>& buffer)
{
    return readNetFromONNX(reinterpret_cast<const char*>(buffer.data()), buffer.size());
}

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
Mat readTensorFromONNX(const String& path)
{
    opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
    std::fstream input(path.c_str(), std::ios::in | std::ios::binary);
    if (!tensor_proto.ParseFromIstream(&input)) {
        CV_Error(Error::StsUnsupportedFormat, "Failed to parse data");
    }
    Mat mat = getMatFromTensor(tensor_proto);
    releaseONNXTensor(tensor_proto);
    return mat;
}

CV__DNN_EXPERIMENTAL_NS_END
}} // namespace

#endif