op_inf_engine.hpp 6.3 KB
Newer Older
1 2 3 4
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
5
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 7 8 9 10
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

11
#include "opencv2/core/cvdef.h"
12 13
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
14

A
Alexander Alekhin 已提交
15 16 17
#include "opencv2/core/async.hpp"
#include "opencv2/core/detail/async_promise.hpp"

18 19
#include "opencv2/dnn/utils/inference_engine.hpp"

20
#ifdef HAVE_INF_ENGINE
21

22
#define INF_ENGINE_RELEASE_2018R5 2018050000
23
#define INF_ENGINE_RELEASE_2019R1 2019010000
24 25

#ifndef INF_ENGINE_RELEASE
26 27
#warning("IE version have not been provided via command-line. Using 2019R1 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2019R1
28 29 30
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
31
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
32
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
33
#define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
34
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
35

36 37 38 39 40 41 42 43 44 45 46
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif

#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
#pragma GCC visibility push(default)
#endif

#include <inference_engine.hpp>

47
#include <ie_builders.hpp>
48

49 50 51 52 53 54 55 56
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
#pragma GCC visibility pop
#endif

#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif

57 58 59 60 61 62
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

63 64 65 66 67 68 69
class InfEngineBackendNet
{
public:
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

70
    void addLayer(InferenceEngine::Builder::Layer& layer);
71 72 73 74 75 76 77 78 79 80 81

    void addOutput(const std::string& name);

    void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
                 const std::vector<Ptr<BackendWrapper> >& outputs,
                 const std::string& layerName);

    bool isInitialized();

    void init(int targetId);

82 83
    void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                 bool isAsync);
84 85 86 87 88 89 90 91 92 93 94 95 96 97

    void initPlugin(InferenceEngine::ICNNNetwork& net);

    void addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs);

private:
    InferenceEngine::Builder::Network netBuilder;

    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::BlobMap allBlobs;
    InferenceEngine::TargetDevice targetDevice;

98 99 100 101 102 103 104
    struct InfEngineReqWrapper
    {
        InfEngineReqWrapper() : isReady(true) {}

        void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);

        InferenceEngine::InferRequest req;
A
Alexander Alekhin 已提交
105
        std::vector<cv::AsyncPromise> outProms;
106 107 108 109 110 111
        std::vector<std::string> outsNames;
        bool isReady;
    };

    std::vector<Ptr<InfEngineReqWrapper> > infRequests;

112 113 114 115 116 117 118 119 120
    InferenceEngine::CNNNetwork cnn;
    bool hasNetOwner;

    std::map<std::string, int> layers;
    std::vector<std::string> requestedOutputs;

    std::set<int> unconnectedLayersIds;
};

121 122 123
class InfEngineBackendNode : public BackendNode
{
public:
124
    InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
125 126 127 128 129

    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    // Inference Engine network object that allows to obtain the outputs of this layer.
130
    InferenceEngine::Builder::Layer layer;
131 132 133 134 135 136 137 138
    Ptr<InfEngineBackendNet> net;
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

139 140
    InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);

141 142
    ~InfEngineBackendWrapper();

143 144
    static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);

145
    virtual void copyToHost() CV_OVERRIDE;
146

147
    virtual void setHostDirty() CV_OVERRIDE;
148 149

    InferenceEngine::DataPtr dataPtr;
150
    InferenceEngine::Blob::Ptr blob;
A
Alexander Alekhin 已提交
151
    AsyncArray futureMat;
152 153
};

154
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
155

156
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
157 158 159

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

160 161 162 163
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
164
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
165

166 167
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);

168 169 170 171 172 173
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
A
Alexander Nesterov 已提交
174
    InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
175 176 177 178

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
179
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
180 181

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
182
                         OutputArrayOfArrays internals) CV_OVERRIDE;
183

184
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
185 186

private:
A
Alexander Nesterov 已提交
187
    InferenceEngine::CNNNetwork t_net;
188 189
};

190 191 192 193 194 195
CV__DNN_EXPERIMENTAL_NS_BEGIN

bool isMyriadX();

CV__DNN_EXPERIMENTAL_NS_END

196 197 198 199
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

200 201
void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                      Ptr<BackendNode>& node, bool isAsync);
202 203 204 205

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__