op_inf_engine.hpp 7.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

11
#include "opencv2/core/cvdef.h"
12 13
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
14

15
#ifdef HAVE_INF_ENGINE
16 17 18 19
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
20
#include <inference_engine.hpp>
21 22 23
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
24 25 26 27 28 29 30 31 32 33 34

#define INF_ENGINE_RELEASE_2018R1 2018010000
#define INF_ENGINE_RELEASE_2018R2 2018020000

#ifndef INF_ENGINE_RELEASE
#warning("IE version have not been provided via command-line. Using 2018R2 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R2
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))

35 36 37 38 39 40 41 42 43
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
{
public:
44 45 46 47
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

48
    virtual void Release() noexcept CV_OVERRIDE;
49

50 51
    void setPrecision(InferenceEngine::Precision p) noexcept;

52 53 54
    virtual InferenceEngine::Precision getPrecision() noexcept;

    virtual InferenceEngine::Precision getPrecision() const noexcept;
55

56
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
57

58
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
59

60
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
D
Dmitry Kurtaev 已提交
61

62
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
63

64 65 66
    virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;

    virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept;
67

M
Maksim Shabunin 已提交
68 69 70
    virtual void getName(char *pName, size_t len) noexcept;

    virtual void getName(char *pName, size_t len) const noexcept;
71

72 73 74 75 76
    virtual const std::string& getName() const noexcept;

    virtual size_t layerCount() noexcept;

    virtual size_t layerCount() const noexcept;
77

78 79 80
    virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;

    virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
81 82 83

    virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
                                                  size_t outputIndex = 0,
84
                                                  InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
85 86 87

    virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                       InferenceEngine::CNNLayerPtr &out,
88 89 90 91 92
                                                       InferenceEngine::ResponseDesc *resp) noexcept;

    virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                       InferenceEngine::CNNLayerPtr &out,
                                                       InferenceEngine::ResponseDesc *resp) const noexcept;
93

94
    virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
95

96 97 98
    virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;

    virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
99

100
    virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
101

102 103
    virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) noexcept;

104
    virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
105

106
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R2)
107 108
    virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) noexcept;
    virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) noexcept;
109
#endif
110

111
    void init(int targetId);
112 113 114 115 116 117 118 119 120 121 122 123 124 125

    void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);

    void forward();

    bool isInitialized();

private:
    std::vector<InferenceEngine::CNNLayerPtr> layers;
    InferenceEngine::InputsDataMap inputs;
    InferenceEngine::OutputsDataMap outputs;
    InferenceEngine::BlobMap inpBlobs;
    InferenceEngine::BlobMap outBlobs;
    InferenceEngine::BlobMap allBlobs;
126 127
    InferenceEngine::TargetDevice targetDevice;
    InferenceEngine::Precision precision;
128 129 130 131
    InferenceEngine::InferenceEnginePluginPtr enginePtr;
    InferenceEngine::InferencePlugin plugin;
    InferenceEngine::ExecutableNetwork netExec;
    InferenceEngine::InferRequest infRequest;
132

133 134
    std::string name;

135
    void initPlugin(InferenceEngine::ICNNNetwork& net);
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
};

class InfEngineBackendNode : public BackendNode
{
public:
    InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);

    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    InferenceEngine::CNNLayerPtr layer;
    // Inference Engine network object that allows to obtain the outputs of this layer.
    Ptr<InfEngineBackendNet> net;
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

156 157
    InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);

158 159
    ~InfEngineBackendWrapper();

160 161
    static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);

162
    virtual void copyToHost() CV_OVERRIDE;
163

164
    virtual void setHostDirty() CV_OVERRIDE;
165 166

    InferenceEngine::DataPtr dataPtr;
167
    InferenceEngine::Blob::Ptr blob;
168 169
};

170
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
171

172
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
173 174 175

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

176 177 178 179 180
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
181

182 183 184 185 186 187 188 189 190 191 192
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
    InfEngineBackendLayer(const InferenceEngine::DataPtr& output);

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
193
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
194 195

    virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
196
                         std::vector<Mat> &internals) CV_OVERRIDE;
197 198

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
199
                         OutputArrayOfArrays internals) CV_OVERRIDE;
200

201
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
202 203 204 205 206

private:
    InferenceEngine::DataPtr output;
};

207 208 209 210 211 212 213 214 215
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

void forwardInfEngine(Ptr<BackendNode>& node);

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__