op_inf_engine.hpp 10.2 KB
Newer Older
1 2 3 4
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
5
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 7 8 9 10
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

11
#include "opencv2/core/cvdef.h"
12 13
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
14

15 16
#include "opencv2/dnn/utils/inference_engine.hpp"

17
#ifdef HAVE_INF_ENGINE
18 19 20 21
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
22
#include <inference_engine.hpp>
23 24 25
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
26

27
#define INF_ENGINE_RELEASE_2018R3 2018030000
28
#define INF_ENGINE_RELEASE_2018R4 2018040000
29
#define INF_ENGINE_RELEASE_2018R5 2018050000
30 31

#ifndef INF_ENGINE_RELEASE
32 33
#warning("IE version have not been provided via command-line. Using 2018R5 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R5
34 35 36
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
37
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
38
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
39
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
40 41 42 43

#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#include <ie_builders.hpp>
#endif
44

45 46 47 48 49 50
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

51
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
52 53 54
class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
{
public:
55 56 57 58
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

59
    virtual void Release() noexcept CV_OVERRIDE;
60

61 62
    void setPrecision(InferenceEngine::Precision p) noexcept;

63 64 65
    virtual InferenceEngine::Precision getPrecision() noexcept;

    virtual InferenceEngine::Precision getPrecision() const noexcept;
66

67
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
68

69
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
70

71
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
D
Dmitry Kurtaev 已提交
72

73
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
74

75 76 77
    virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;

    virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept;
78

79 80
    virtual InferenceEngine::StatusCode serialize(const std::string &xmlPath, const std::string &binPath, InferenceEngine::ResponseDesc* resp) const noexcept;

M
Maksim Shabunin 已提交
81 82 83
    virtual void getName(char *pName, size_t len) noexcept;

    virtual void getName(char *pName, size_t len) const noexcept;
84

85 86 87 88 89
    virtual const std::string& getName() const noexcept;

    virtual size_t layerCount() noexcept;

    virtual size_t layerCount() const noexcept;
90

91 92 93
    virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;

    virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
94 95 96

    virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
                                                  size_t outputIndex = 0,
97
                                                  InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
98 99 100

    virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                       InferenceEngine::CNNLayerPtr &out,
101 102 103 104 105
                                                       InferenceEngine::ResponseDesc *resp) noexcept;

    virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                       InferenceEngine::CNNLayerPtr &out,
                                                       InferenceEngine::ResponseDesc *resp) const noexcept;
106

107
    virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
108

109 110 111
    virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;

    virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
112

113
    virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
114

115 116
    virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) noexcept;

117
    virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
118

119 120
    virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
    virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
121

122
    void init(int targetId);
123 124 125 126 127 128 129 130 131 132 133 134 135 136

    void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);

    void forward();

    bool isInitialized();

private:
    std::vector<InferenceEngine::CNNLayerPtr> layers;
    InferenceEngine::InputsDataMap inputs;
    InferenceEngine::OutputsDataMap outputs;
    InferenceEngine::BlobMap inpBlobs;
    InferenceEngine::BlobMap outBlobs;
    InferenceEngine::BlobMap allBlobs;
137 138
    InferenceEngine::TargetDevice targetDevice;
    InferenceEngine::Precision precision;
139 140 141 142
    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::InferRequest infRequest;
143 144
    // In case of models from Model Optimizer we need to manage their lifetime.
    InferenceEngine::CNNNetwork netOwner;
D
Dmitry Kurtaev 已提交
145 146 147
    // There is no way to check if netOwner is initialized or not so we use
    // a separate flag to determine if the model has been loaded from IR.
    bool hasNetOwner;
148

149 150
    std::string name;

151
    void initPlugin(InferenceEngine::ICNNNetwork& net);
152 153
};

154 155 156 157 158 159 160 161 162
#else  // IE < R5

class InfEngineBackendNet
{
public:
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

163
    void addLayer(InferenceEngine::Builder::Layer& layer);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202

    void addOutput(const std::string& name);

    void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
                 const std::vector<Ptr<BackendWrapper> >& outputs,
                 const std::string& layerName);

    bool isInitialized();

    void init(int targetId);

    void forward();

    void initPlugin(InferenceEngine::ICNNNetwork& net);

    void addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs);

private:
    InferenceEngine::Builder::Network netBuilder;

    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::InferRequest infRequest;
    InferenceEngine::BlobMap allBlobs;
    InferenceEngine::BlobMap inpBlobs;
    InferenceEngine::BlobMap outBlobs;
    InferenceEngine::TargetDevice targetDevice;

    InferenceEngine::CNNNetwork cnn;
    bool hasNetOwner;

    std::map<std::string, int> layers;
    std::vector<std::string> requestedOutputs;

    std::set<int> unconnectedLayersIds;
};
#endif  // IE < R5

203 204 205
class InfEngineBackendNode : public BackendNode
{
public:
206 207 208
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
    InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
#else
209
    InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
210
#endif
211 212 213 214 215

    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    // Inference Engine network object that allows to obtain the outputs of this layer.
216 217
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
    InferenceEngine::Builder::Layer layer;
218
    Ptr<InfEngineBackendNet> net;
219 220 221 222
#else
    InferenceEngine::CNNLayerPtr layer;
    Ptr<InfEngineBackendNet> net;
#endif
223 224 225 226 227 228 229
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

230 231
    InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);

232 233
    ~InfEngineBackendWrapper();

234 235
    static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);

236
    virtual void copyToHost() CV_OVERRIDE;
237

238
    virtual void setHostDirty() CV_OVERRIDE;
239 240

    InferenceEngine::DataPtr dataPtr;
241
    InferenceEngine::Blob::Ptr blob;
242 243
};

244
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
245

246
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
247 248 249

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

250 251 252 253
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
254
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
255

256 257 258 259
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
#endif

260 261 262 263 264 265
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
A
Alexander Nesterov 已提交
266
    InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
267 268 269 270

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
271
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
272 273

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
274
                         OutputArrayOfArrays internals) CV_OVERRIDE;
275

276
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
277 278

private:
A
Alexander Nesterov 已提交
279
    InferenceEngine::CNNNetwork t_net;
280 281
};

282 283 284 285 286 287
CV__DNN_EXPERIMENTAL_NS_BEGIN

bool isMyriadX();

CV__DNN_EXPERIMENTAL_NS_END

288 289 290 291 292 293 294 295 296
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

void forwardInfEngine(Ptr<BackendNode>& node);

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__