op_inf_engine.hpp 5.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.

#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__

#ifdef HAVE_INF_ENGINE
12 13 14 15
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
16
#include <inference_engine.hpp>
17 18 19
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
20 21 22 23 24 25 26 27 28
#endif  // HAVE_INF_ENGINE

namespace cv { namespace dnn {

#ifdef HAVE_INF_ENGINE

class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
{
public:
29 30 31 32
    InfEngineBackendNet();

    InfEngineBackendNet(InferenceEngine::CNNNetwork& net);

33
    virtual void Release() noexcept CV_OVERRIDE;
34

35 36
    void setPrecision(InferenceEngine::Precision p) noexcept;

37
    virtual InferenceEngine::Precision getPrecision() noexcept CV_OVERRIDE;
38

39
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
40

41
    virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
42

43
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
D
Dmitry Kurtaev 已提交
44

45
    virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
46

47
    virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept CV_OVERRIDE;
48

M
Maksim Shabunin 已提交
49 50 51
    virtual void getName(char *pName, size_t len) noexcept;

    virtual void getName(char *pName, size_t len) const noexcept;
52

53
    virtual size_t layerCount() noexcept CV_OVERRIDE;
54

55 56 57
    virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;

    virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
58 59 60

    virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
                                                  size_t outputIndex = 0,
61
                                                  InferenceEngine::ResponseDesc *resp = nullptr) noexcept CV_OVERRIDE;
62 63 64

    virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                       InferenceEngine::CNNLayerPtr &out,
65
                                                       InferenceEngine::ResponseDesc *resp) noexcept CV_OVERRIDE;
66

67
    virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
68

69
    virtual InferenceEngine::TargetDevice getTargetDevice() noexcept CV_OVERRIDE;
70

71
    virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
72

73
    virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
74

75
    void init(int targetId);
76 77 78 79 80 81 82 83 84 85 86 87 88 89

    void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);

    void forward();

    bool isInitialized();

private:
    std::vector<InferenceEngine::CNNLayerPtr> layers;
    InferenceEngine::InputsDataMap inputs;
    InferenceEngine::OutputsDataMap outputs;
    InferenceEngine::BlobMap inpBlobs;
    InferenceEngine::BlobMap outBlobs;
    InferenceEngine::BlobMap allBlobs;
90 91
    InferenceEngine::TargetDevice targetDevice;
    InferenceEngine::Precision precision;
92 93 94
    InferenceEngine::InferenceEnginePluginPtr plugin;

    void initPlugin(InferenceEngine::ICNNNetwork& net);
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
};

class InfEngineBackendNode : public BackendNode
{
public:
    InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);

    void connect(std::vector<Ptr<BackendWrapper> >& inputs,
                 std::vector<Ptr<BackendWrapper> >& outputs);

    InferenceEngine::CNNLayerPtr layer;
    // Inference Engine network object that allows to obtain the outputs of this layer.
    Ptr<InfEngineBackendNet> net;
};

class InfEngineBackendWrapper : public BackendWrapper
{
public:
    InfEngineBackendWrapper(int targetId, const Mat& m);

    ~InfEngineBackendWrapper();

117
    virtual void copyToHost() CV_OVERRIDE;
118

119
    virtual void setHostDirty() CV_OVERRIDE;
120 121 122 123 124

    InferenceEngine::DataPtr dataPtr;
    InferenceEngine::TBlob<float>::Ptr blob;
};

125
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
126

127
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
128 129 130

InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);

131 132 133 134 135
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);

// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
136

137 138 139 140 141 142 143 144 145 146 147
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
    InfEngineBackendLayer(const InferenceEngine::DataPtr& output);

    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
                                 const int requiredOutputs,
                                 std::vector<MatShape> &outputs,
148
                                 std::vector<MatShape> &internals) const CV_OVERRIDE;
149 150

    virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
151
                         std::vector<Mat> &internals) CV_OVERRIDE;
152 153

    virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
154
                         OutputArrayOfArrays internals) CV_OVERRIDE;
155

156
    virtual bool supportBackend(int backendId) CV_OVERRIDE;
157 158 159 160 161

private:
    InferenceEngine::DataPtr output;
};

162 163 164 165 166 167 168 169 170
#endif  // HAVE_INF_ENGINE

bool haveInfEngine();

void forwardInfEngine(Ptr<BackendNode>& node);

}}  // namespace dnn, namespace cv

#endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__