op_inf_engine.hpp 8.8 KB
Newer Older
1 2 3 4
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
5
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 7 8 9 10
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

11
#include "opencv2/core/cvdef.h"
12 13
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
14

A
Alexander Alekhin 已提交
15 16 17
#include "opencv2/core/async.hpp"
#include "opencv2/core/detail/async_promise.hpp"

18 19
#include "opencv2/dnn/utils/inference_engine.hpp"

20
#ifdef HAVE_INF_ENGINE
21

22
#define INF_ENGINE_RELEASE_2018R5 2018050000
23
#define INF_ENGINE_RELEASE_2019R1 2019010000
24
#define INF_ENGINE_RELEASE_2019R2 2019020000
25
#define INF_ENGINE_RELEASE_2019R3 2019030000
26
#define INF_ENGINE_RELEASE_2020_1 2020010000
27
#define INF_ENGINE_RELEASE_2020_2 2020020000
28
#define INF_ENGINE_RELEASE_2020_3 2020030000
29
#define INF_ENGINE_RELEASE_2020_4 2020040000
30
#define INF_ENGINE_RELEASE_2021_1 2021010000
31 32

#ifndef INF_ENGINE_RELEASE
33 34
#warning("IE version have not been provided via command-line. Using 2021.1 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_1
35 36 37
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
38
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
39
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
40
#define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
41
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
42

43 44 45 46 47
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif

48
#if defined(HAVE_DNN_IE_NN_BUILDER_2019) || INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2020_4)
49
//#define INFERENCE_ENGINE_DEPRECATED  // turn off deprecation warnings from IE
L
luz.paz 已提交
50
//there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally
51 52 53 54 55 56
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef _MSC_VER
#pragma warning(disable: 4996)  // was declared deprecated
#endif
57
#endif
58

59
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
60 61 62 63 64
#pragma GCC visibility push(default)
#endif

#include <inference_engine.hpp>

65
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
66
#include <ie_builders.hpp>
67
#endif
68

69
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
70 71 72 73 74 75 76
#pragma GCC visibility pop
#endif

#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif

77 78 79 80 81 82
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

83 84
Backend& getInferenceEngineBackendTypeParam();

85 86 87 88 89 90 91
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
                          std::vector<Mat>& mats);

#ifdef HAVE_DNN_IE_NN_BUILDER_2019

92 93 94 95 96 97 98
class InfEngineBackendNet
{
public:
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

99
    void addLayer(InferenceEngine::Builder::Layer& layer);
100 101 102 103 104 105 106 107 108

    void addOutput(const std::string& name);

    void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
                 const std::vector<Ptr<BackendWrapper> >& outputs,
                 const std::string& layerName);

    bool isInitialized();

109
    void init(Target targetId);
110

111 112
    void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                 bool isAsync);
113

114
    void initPlugin(InferenceEngine::CNNNetwork& net);
115

N
Nuzhny007 已提交
116
    void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
117

118 119
    void reset();

120 121 122 123 124
private:
    InferenceEngine::Builder::Network netBuilder;

    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::BlobMap allBlobs;
125 126 127 128 129 130 131
    std::string device_name;
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
#else
    bool isInit = false;
#endif
132

133 134 135 136 137 138 139
    struct InfEngineReqWrapper
    {
        InfEngineReqWrapper() : isReady(true) {}

        void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);

        InferenceEngine::InferRequest req;
A
Alexander Alekhin 已提交
140
        std::vector<cv::AsyncPromise> outProms;
141 142 143 144 145 146
        std::vector<std::string> outsNames;
        bool isReady;
    };

    std::vector<Ptr<InfEngineReqWrapper> > infRequests;

147 148 149 150 151 152
    InferenceEngine::CNNNetwork cnn;
    bool hasNetOwner;

    std::map<std::string, int> layers;
    std::vector<std::string> requestedOutputs;

153
    std::set<std::pair<int, int> > unconnectedPorts;
154 155
};

156 157 158
class InfEngineBackendNode : public BackendNode
{
public:
159
    InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
160

161 162 163
    InfEngineBackendNode(Ptr<Layer>& layer, std::vector<Mat*>& inputs,
                         std::vector<Mat>& outputs, std::vector<Mat>& internals);

164 165 166 167
    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    // Inference Engine network object that allows to obtain the outputs of this layer.
168
    InferenceEngine::Builder::Layer layer;
169
    Ptr<InfEngineBackendNet> net;
170 171
    // CPU fallback in case of unsupported Inference Engine layer.
    Ptr<dnn::Layer> cvLayer;
172 173 174 175 176 177 178
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

179 180
    InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);

181 182
    ~InfEngineBackendWrapper();

183 184
    static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);

185
    virtual void copyToHost() CV_OVERRIDE;
186

187
    virtual void setHostDirty() CV_OVERRIDE;
188 189

    InferenceEngine::DataPtr dataPtr;
190
    InferenceEngine::Blob::Ptr blob;
A
Alexander Alekhin 已提交
191
    AsyncArray futureMat;
192 193
};

194
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
195

196
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
197 198 199

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

200 201
// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
202
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
203

204 205
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);

206 207 208 209 210 211
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
A
Alexander Nesterov 已提交
212
    InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
213 214 215 216

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
217
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
218 219

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
220
                         OutputArrayOfArrays internals) CV_OVERRIDE;
221

222
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
223 224

private:
A
Alexander Nesterov 已提交
225
    InferenceEngine::CNNNetwork t_net;
226 227
};

228 229 230
class InfEngineExtension : public InferenceEngine::IExtension
{
public:
I
Ilya Lavrenov 已提交
231
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
232
    virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
I
Ilya Lavrenov 已提交
233
#endif
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
    virtual void Unload() noexcept {}
    virtual void Release() noexcept {}
    virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}

    virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
                                                          InferenceEngine::ResponseDesc*) noexcept
    {
        return InferenceEngine::StatusCode::OK;
    }

    InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
                                              const InferenceEngine::CNNLayer* cnnLayer,
                                              InferenceEngine::ResponseDesc* resp) noexcept;
};

249 250
#endif  // HAVE_DNN_IE_NN_BUILDER_2019

251

252 253 254 255 256 257
CV__DNN_EXPERIMENTAL_NS_BEGIN

bool isMyriadX();

CV__DNN_EXPERIMENTAL_NS_END

258
InferenceEngine::Core& getCore(const std::string& id);
259 260 261 262 263 264 265 266 267 268 269

template<typename T = size_t>
static inline std::vector<T> getShape(const Mat& mat)
{
    std::vector<T> result(mat.dims);
    for (int i = 0; i < mat.dims; i++)
        result[i] = (T)mat.size[i];
    return result;
}


270 271 272 273
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

274 275
void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                      Ptr<BackendNode>& node, bool isAsync);
276 277 278 279

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__