op_inf_engine.hpp 6.5 KB
Newer Older
1 2 3 4
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
5
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 7 8 9 10
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

11
#include "opencv2/core/cvdef.h"
12 13
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
14

A
Alexander Alekhin 已提交
15 16 17
#include "opencv2/core/async.hpp"
#include "opencv2/core/detail/async_promise.hpp"

18 19
#include "opencv2/dnn/utils/inference_engine.hpp"

20
#ifdef HAVE_INF_ENGINE
21

22
#define INF_ENGINE_RELEASE_2018R5 2018050000
23
#define INF_ENGINE_RELEASE_2019R1 2019010000
24 25

#ifndef INF_ENGINE_RELEASE
26 27
#warning("IE version have not been provided via command-line. Using 2019R1 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2019R1
28 29 30
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
31
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
32
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
33
#define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
34
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
35

36 37 38 39 40
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif

41 42 43 44 45 46 47 48 49 50
//#define INFERENCE_ENGINE_DEPRECATED  // turn off deprecation warnings from IE
//there is no way to suppress warnigns from IE only at this moment, so we are forced to suppress warnings globally
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef _MSC_VER
#pragma warning(disable: 4996)  // was declared deprecated
#endif

#if defined(__GNUC__)
51 52 53 54 55
#pragma GCC visibility push(default)
#endif

#include <inference_engine.hpp>

56
#include <ie_builders.hpp>
57

58
#if defined(__GNUC__)
59 60 61 62 63 64 65
#pragma GCC visibility pop
#endif

#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif

66 67 68 69 70 71
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

72 73 74 75 76 77 78
class InfEngineBackendNet
{
public:
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

79
    void addLayer(InferenceEngine::Builder::Layer& layer);
80 81 82 83 84 85 86 87 88 89 90

    void addOutput(const std::string& name);

    void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
                 const std::vector<Ptr<BackendWrapper> >& outputs,
                 const std::string& layerName);

    bool isInitialized();

    void init(int targetId);

91 92
    void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                 bool isAsync);
93 94 95

    void initPlugin(InferenceEngine::ICNNNetwork& net);

N
Nuzhny007 已提交
96
    void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
97 98 99 100 101 102 103 104 105 106

private:
    InferenceEngine::Builder::Network netBuilder;

    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::BlobMap allBlobs;
    InferenceEngine::TargetDevice targetDevice;

107 108 109 110 111 112 113
    struct InfEngineReqWrapper
    {
        InfEngineReqWrapper() : isReady(true) {}

        void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);

        InferenceEngine::InferRequest req;
A
Alexander Alekhin 已提交
114
        std::vector<cv::AsyncPromise> outProms;
115 116 117 118 119 120
        std::vector<std::string> outsNames;
        bool isReady;
    };

    std::vector<Ptr<InfEngineReqWrapper> > infRequests;

121 122 123 124 125 126 127 128 129
    InferenceEngine::CNNNetwork cnn;
    bool hasNetOwner;

    std::map<std::string, int> layers;
    std::vector<std::string> requestedOutputs;

    std::set<int> unconnectedLayersIds;
};

130 131 132
class InfEngineBackendNode : public BackendNode
{
public:
133
    InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
134 135 136 137 138

    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    // Inference Engine network object that allows to obtain the outputs of this layer.
139
    InferenceEngine::Builder::Layer layer;
140 141 142 143 144 145 146 147
    Ptr<InfEngineBackendNet> net;
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

148 149
    InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);

150 151
    ~InfEngineBackendWrapper();

152 153
    static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);

154
    virtual void copyToHost() CV_OVERRIDE;
155

156
    virtual void setHostDirty() CV_OVERRIDE;
157 158

    InferenceEngine::DataPtr dataPtr;
159
    InferenceEngine::Blob::Ptr blob;
A
Alexander Alekhin 已提交
160
    AsyncArray futureMat;
161 162
};

163
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
164

165
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
166 167 168

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

169 170 171 172
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
173
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
174

175 176
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);

177 178 179 180 181 182
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
A
Alexander Nesterov 已提交
183
    InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
184 185 186 187

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
188
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
189 190

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
191
                         OutputArrayOfArrays internals) CV_OVERRIDE;
192

193
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
194 195

private:
A
Alexander Nesterov 已提交
196
    InferenceEngine::CNNNetwork t_net;
197 198
};

199 200 201 202 203 204
CV__DNN_EXPERIMENTAL_NS_BEGIN

bool isMyriadX();

CV__DNN_EXPERIMENTAL_NS_END

205 206 207 208
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

209 210
void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
                      Ptr<BackendNode>& node, bool isAsync);
211 212 213 214

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__