op_inf_engine.hpp 6.6 KB
Newer Older
1 2 3 4
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
5
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 7 8 9 10
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

11
#include "opencv2/core/cvdef.h"
12 13
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
14

A
Alexander Alekhin 已提交
15 16 17
#include "opencv2/core/async.hpp"
#include "opencv2/core/detail/async_promise.hpp"

18 19
#include "opencv2/dnn/utils/inference_engine.hpp"

20
#ifdef HAVE_INF_ENGINE
21

22
#define INF_ENGINE_RELEASE_2018R5 2018050000
23
#define INF_ENGINE_RELEASE_2019R1 2019010000
24
#define INF_ENGINE_RELEASE_2019R2 2019020000
25 26

#ifndef INF_ENGINE_RELEASE
27 28
#warning("IE version have not been provided via command-line. Using 2019R2 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2019R2
29 30 31
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
32
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
33
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
34
#define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
35
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
36

37 38 39 40 41
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif

42 43 44 45 46 47 48 49 50 51
//#define INFERENCE_ENGINE_DEPRECATED  // turn off deprecation warnings from IE
//there is no way to suppress warnigns from IE only at this moment, so we are forced to suppress warnings globally
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef _MSC_VER
#pragma warning(disable: 4996)  // was declared deprecated
#endif

#if defined(__GNUC__)
52 53 54 55 56
#pragma GCC visibility push(default)
#endif

#include <inference_engine.hpp>

57
#include <ie_builders.hpp>
58

59
#if defined(__GNUC__)
60 61 62 63 64 65 66
#pragma GCC visibility pop
#endif

#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif

67 68 69 70 71 72
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

73 74 75 76 77 78 79
class InfEngineBackendNet
{
public:
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

80
    void addLayer(InferenceEngine::Builder::Layer& layer);
81 82 83 84 85 86 87 88 89 90 91

    void addOutput(const std::string& name);

    void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
                 const std::vector<Ptr<BackendWrapper> >& outputs,
                 const std::string& layerName);

    bool isInitialized();

    void init(int targetId);

92 93
    void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                 bool isAsync);
94

95
    void initPlugin(InferenceEngine::CNNNetwork& net);
96

N
Nuzhny007 已提交
97
    void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
98 99 100 101 102 103

private:
    InferenceEngine::Builder::Network netBuilder;

    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::BlobMap allBlobs;
104 105 106 107 108 109 110
    std::string device_name;
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
#else
    bool isInit = false;
#endif
111

112 113 114 115 116 117 118
    struct InfEngineReqWrapper
    {
        InfEngineReqWrapper() : isReady(true) {}

        void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);

        InferenceEngine::InferRequest req;
A
Alexander Alekhin 已提交
119
        std::vector<cv::AsyncPromise> outProms;
120 121 122 123 124 125
        std::vector<std::string> outsNames;
        bool isReady;
    };

    std::vector<Ptr<InfEngineReqWrapper> > infRequests;

126 127 128 129 130 131 132 133 134
    InferenceEngine::CNNNetwork cnn;
    bool hasNetOwner;

    std::map<std::string, int> layers;
    std::vector<std::string> requestedOutputs;

    std::set<int> unconnectedLayersIds;
};

135 136 137
class InfEngineBackendNode : public BackendNode
{
public:
138
    InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
139 140 141 142 143

    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    // Inference Engine network object that allows to obtain the outputs of this layer.
144
    InferenceEngine::Builder::Layer layer;
145 146 147 148 149 150 151 152
    Ptr<InfEngineBackendNet> net;
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

153 154
    InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);

155 156
    ~InfEngineBackendWrapper();

157 158
    static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);

159
    virtual void copyToHost() CV_OVERRIDE;
160

161
    virtual void setHostDirty() CV_OVERRIDE;
162 163

    InferenceEngine::DataPtr dataPtr;
164
    InferenceEngine::Blob::Ptr blob;
A
Alexander Alekhin 已提交
165
    AsyncArray futureMat;
166 167
};

168
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
169

170
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
171 172 173

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

174 175 176 177
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
178
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
179

180 181
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);

182 183 184 185 186 187
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
A
Alexander Nesterov 已提交
188
    InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
189 190 191 192

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
193
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
194 195

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
196
                         OutputArrayOfArrays internals) CV_OVERRIDE;
197

198
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
199 200

private:
A
Alexander Nesterov 已提交
201
    InferenceEngine::CNNNetwork t_net;
202 203
};

204 205 206 207 208 209
CV__DNN_EXPERIMENTAL_NS_BEGIN

bool isMyriadX();

CV__DNN_EXPERIMENTAL_NS_END

210 211 212 213
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

214 215
void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                      Ptr<BackendNode>& node, bool isAsync);
216 217 218 219

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__