op_inf_engine.hpp 8.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

11
#include "opencv2/core/cvdef.h"
12 13
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
14

15
#ifdef HAVE_INF_ENGINE
16 17 18 19
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
20
#include <inference_engine.hpp>
21 22 23
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
24 25 26

#define INF_ENGINE_RELEASE_2018R1 2018010000
#define INF_ENGINE_RELEASE_2018R2 2018020000
27
#define INF_ENGINE_RELEASE_2018R3 2018030000
28
#define INF_ENGINE_RELEASE_2018R4 2018040000
29
#define INF_ENGINE_RELEASE_2018R5 2018050000
30 31

#ifndef INF_ENGINE_RELEASE
32 33
#warning("IE version have not been provided via command-line. Using 2018R5 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R5
34 35 36
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
37
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
38

39 40 41 42 43 44 45 46 47
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
{
public:
48 49 50 51
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

52
    virtual void Release() noexcept CV_OVERRIDE;
53

54 55
    void setPrecision(InferenceEngine::Precision p) noexcept;

56 57 58
    virtual InferenceEngine::Precision getPrecision() noexcept;

    virtual InferenceEngine::Precision getPrecision() const noexcept;
59

60
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
61

62
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
63

64
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
D
Dmitry Kurtaev 已提交
65

66
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
67

68 69 70
    virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;

    virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept;
71

72 73
    virtual InferenceEngine::StatusCode serialize(const std::string &xmlPath, const std::string &binPath, InferenceEngine::ResponseDesc* resp) const noexcept;

M
Maksim Shabunin 已提交
74 75 76
    virtual void getName(char *pName, size_t len) noexcept;

    virtual void getName(char *pName, size_t len) const noexcept;
77

78 79 80 81 82
    virtual const std::string& getName() const noexcept;

    virtual size_t layerCount() noexcept;

    virtual size_t layerCount() const noexcept;
83

84 85 86
    virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;

    virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
87 88 89

    virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
                                                  size_t outputIndex = 0,
90
                                                  InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
91 92 93

    virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                       InferenceEngine::CNNLayerPtr &out,
94 95 96 97 98
                                                       InferenceEngine::ResponseDesc *resp) noexcept;

    virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                       InferenceEngine::CNNLayerPtr &out,
                                                       InferenceEngine::ResponseDesc *resp) const noexcept;
99

100
    virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
101

102 103 104
    virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;

    virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
105

106
    virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
107

108 109
    virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) noexcept;

110
    virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
111

112
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R2)
113 114
    virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) noexcept;
    virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) noexcept;
115
#endif
116

117
    void init(int targetId);
118 119 120 121 122 123 124 125 126 127 128 129 130 131

    void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);

    void forward();

    bool isInitialized();

private:
    std::vector<InferenceEngine::CNNLayerPtr> layers;
    InferenceEngine::InputsDataMap inputs;
    InferenceEngine::OutputsDataMap outputs;
    InferenceEngine::BlobMap inpBlobs;
    InferenceEngine::BlobMap outBlobs;
    InferenceEngine::BlobMap allBlobs;
132 133
    InferenceEngine::TargetDevice targetDevice;
    InferenceEngine::Precision precision;
134 135 136 137
    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::InferRequest infRequest;
138 139
    // In case of models from Model Optimizer we need to manage their lifetime.
    InferenceEngine::CNNNetwork netOwner;
D
Dmitry Kurtaev 已提交
140 141 142
    // There is no way to check if netOwner is initialized or not so we use
    // a separate flag to determine if the model has been loaded from IR.
    bool hasNetOwner;
143

144 145
    std::string name;

146
    void initPlugin(InferenceEngine::ICNNNetwork& net);
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
};

class InfEngineBackendNode : public BackendNode
{
public:
    InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);

    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    InferenceEngine::CNNLayerPtr layer;
    // Inference Engine network object that allows to obtain the outputs of this layer.
    Ptr<InfEngineBackendNet> net;
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

167 168
    InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);

169 170
    ~InfEngineBackendWrapper();

171 172
    static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);

173
    virtual void copyToHost() CV_OVERRIDE;
174

175
    virtual void setHostDirty() CV_OVERRIDE;
176 177

    InferenceEngine::DataPtr dataPtr;
178
    InferenceEngine::Blob::Ptr blob;
179 180
};

181
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
182

183
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
184 185 186

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

187 188 189 190 191
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
192

193 194 195 196 197 198 199 200 201 202 203
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
    InfEngineBackendLayer(const InferenceEngine::DataPtr& output);

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
204
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
205 206

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
207
                         OutputArrayOfArrays internals) CV_OVERRIDE;
208

209
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
210 211 212 213 214

private:
    InferenceEngine::DataPtr output;
};

215 216 217 218 219 220 221 222 223
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

void forwardInfEngine(Ptr<BackendNode>& node);

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__