custom_layers.hpp 9.8 KB
Newer Older
1 2
#ifndef __OPENCV_SAMPLES_DNN_CUSTOM_LAYERS__
#define __OPENCV_SAMPLES_DNN_CUSTOM_LAYERS__
3

4 5
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/shape_utils.hpp>    // getPlane
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24

//! [InterpLayer]
class InterpLayer : public cv::dnn::Layer
{
public:
    InterpLayer(const cv::dnn::LayerParams &params) : Layer(params)
    {
        outWidth = params.get<int>("width", 0);
        outHeight = params.get<int>("height", 0);
    }

    static cv::Ptr<cv::dnn::Layer> create(cv::dnn::LayerParams& params)
    {
        return cv::Ptr<cv::dnn::Layer>(new InterpLayer(params));
    }

    virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
                                 const int requiredOutputs,
                                 std::vector<std::vector<int> > &outputs,
25
                                 std::vector<std::vector<int> > &internals) const CV_OVERRIDE
26 27 28 29 30 31 32 33 34 35 36 37
    {
        CV_UNUSED(requiredOutputs); CV_UNUSED(internals);
        std::vector<int> outShape(4);
        outShape[0] = inputs[0][0];  // batch size
        outShape[1] = inputs[0][1];  // number of channels
        outShape[2] = outHeight;
        outShape[3] = outWidth;
        outputs.assign(1, outShape);
        return false;
    }

    // Implementation of this custom layer is based on https://github.com/cdmh/deeplab-public/blob/master/src/caffe/layers/interp_layer.cpp
38
    virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
    {
        CV_UNUSED(internals);
        cv::Mat& inp = *inputs[0];
        cv::Mat& out = outputs[0];
        const float* inpData = (float*)inp.data;
        float* outData = (float*)out.data;

        const int batchSize = inp.size[0];
        const int numChannels = inp.size[1];
        const int inpHeight = inp.size[2];
        const int inpWidth = inp.size[3];

        const float rheight = (outHeight > 1) ? static_cast<float>(inpHeight - 1) / (outHeight - 1) : 0.f;
        const float rwidth = (outWidth > 1) ? static_cast<float>(inpWidth - 1) / (outWidth - 1) : 0.f;
        for (int h2 = 0; h2 < outHeight; ++h2)
        {
            const float h1r = rheight * h2;
            const int h1 = static_cast<int>(h1r);
            const int h1p = (h1 < inpHeight - 1) ? 1 : 0;
            const float h1lambda = h1r - h1;
            const float h0lambda = 1.f - h1lambda;
            for (int w2 = 0; w2 < outWidth; ++w2)
            {
                const float w1r = rwidth * w2;
                const int w1 = static_cast<int>(w1r);
                const int w1p = (w1 < inpWidth - 1) ? 1 : 0;
                const float w1lambda = w1r - w1;
                const float w0lambda = 1.f - w1lambda;
                const float* pos1 = inpData + h1 * inpWidth + w1;
                float* pos2 = outData + h2 * outWidth + w2;
                for (int c = 0; c < batchSize * numChannels; ++c)
                {
                    pos2[0] =
                      h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) +
                      h1lambda * (w0lambda * pos1[h1p * inpWidth] + w1lambda * pos1[h1p * inpWidth + w1p]);
                    pos1 += inpWidth * inpHeight;
                    pos2 += outWidth * outHeight;
                }
            }
        }
    }

81
    virtual void forward(cv::InputArrayOfArrays, cv::OutputArrayOfArrays, cv::OutputArrayOfArrays) CV_OVERRIDE {}
82 83 84 85 86 87 88

private:
    int outWidth, outHeight;
};
//! [InterpLayer]

//! [ResizeBilinearLayer]
89
class ResizeBilinearLayer CV_FINAL : public cv::dnn::Layer
90 91 92 93 94
{
public:
    ResizeBilinearLayer(const cv::dnn::LayerParams &params) : Layer(params)
    {
        CV_Assert(!params.get<bool>("align_corners", false));
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
        CV_Assert(!blobs.empty());

        for (size_t i = 0; i < blobs.size(); ++i)
            CV_Assert(blobs[i].type() == CV_32SC1);

        // There are two cases of input blob: a single blob which contains output
        // shape and two blobs with scaling factors.
        if (blobs.size() == 1)
        {
            CV_Assert(blobs[0].total() == 2);
            outHeight = blobs[0].at<int>(0, 0);
            outWidth = blobs[0].at<int>(0, 1);
            factorHeight = factorWidth = 0;
        }
        else
        {
            CV_Assert(blobs.size() == 2, blobs[0].total() == 1, blobs[1].total() == 1);
            factorHeight = blobs[0].at<int>(0, 0);
            factorWidth = blobs[1].at<int>(0, 0);
            outHeight = outWidth = 0;
        }
116 117 118 119 120 121 122 123
    }

    static cv::Ptr<cv::dnn::Layer> create(cv::dnn::LayerParams& params)
    {
        return cv::Ptr<cv::dnn::Layer>(new ResizeBilinearLayer(params));
    }

    virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
124
                                 const int,
125
                                 std::vector<std::vector<int> > &outputs,
126
                                 std::vector<std::vector<int> > &) const CV_OVERRIDE
127 128 129 130
    {
        std::vector<int> outShape(4);
        outShape[0] = inputs[0][0];  // batch size
        outShape[1] = inputs[0][1];  // number of channels
131 132
        outShape[2] = outHeight != 0 ? outHeight : (inputs[0][2] * factorHeight);
        outShape[3] = outWidth != 0 ? outWidth : (inputs[0][3] * factorWidth);
133 134 135 136
        outputs.assign(1, outShape);
        return false;
    }

137 138 139 140 141 142 143 144 145
    virtual void finalize(const std::vector<cv::Mat*>&, std::vector<cv::Mat> &outputs) CV_OVERRIDE
    {
        if (!outWidth && !outHeight)
        {
            outHeight = outputs[0].size[2];
            outWidth = outputs[0].size[3];
        }
    }

146 147
    // This implementation is based on a reference implementation from
    // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
148
    virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &) CV_OVERRIDE
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
    {
        cv::Mat& inp = *inputs[0];
        cv::Mat& out = outputs[0];
        const float* inpData = (float*)inp.data;
        float* outData = (float*)out.data;

        const int batchSize = inp.size[0];
        const int numChannels = inp.size[1];
        const int inpHeight = inp.size[2];
        const int inpWidth = inp.size[3];

        float heightScale = static_cast<float>(inpHeight) / outHeight;
        float widthScale = static_cast<float>(inpWidth) / outWidth;
        for (int b = 0; b < batchSize; ++b)
        {
            for (int y = 0; y < outHeight; ++y)
            {
                float input_y = y * heightScale;
                int y0 = static_cast<int>(std::floor(input_y));
                int y1 = std::min(y0 + 1, inpHeight - 1);
                for (int x = 0; x < outWidth; ++x)
                {
                    float input_x = x * widthScale;
                    int x0 = static_cast<int>(std::floor(input_x));
                    int x1 = std::min(x0 + 1, inpWidth - 1);
                    for (int c = 0; c < numChannels; ++c)
                    {
                        float interpolation =
                            inpData[offset(inp.size, c, x0, y0, b)] * (1 - (input_y - y0)) * (1 - (input_x - x0)) +
                            inpData[offset(inp.size, c, x0, y1, b)] * (input_y - y0) * (1 - (input_x - x0)) +
                            inpData[offset(inp.size, c, x1, y0, b)] * (1 - (input_y - y0)) * (input_x - x0) +
                            inpData[offset(inp.size, c, x1, y1, b)] * (input_y - y0) * (input_x - x0);
                        outData[offset(out.size, c, x, y, b)] = interpolation;
                    }
                }
            }
        }
    }

188
    virtual void forward(cv::InputArrayOfArrays, cv::OutputArrayOfArrays, cv::OutputArrayOfArrays) CV_OVERRIDE {}
189 190 191 192 193 194 195

private:
    static inline int offset(const cv::MatSize& size, int c, int x, int y, int b)
    {
        return x + size[3] * (y + size[2] * (c + size[1] * b));
    }

196
    int outWidth, outHeight, factorWidth, factorHeight;
197 198 199
};
//! [ResizeBilinearLayer]

200
//
L
luz.paz 已提交
201
// The following code is used only to generate tutorials documentation.
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
//

//! [A custom layer interface]
class MyLayer : public cv::dnn::Layer
{
public:
    //! [MyLayer::MyLayer]
    MyLayer(const cv::dnn::LayerParams &params);
    //! [MyLayer::MyLayer]

    //! [MyLayer::create]
    static cv::Ptr<cv::dnn::Layer> create(cv::dnn::LayerParams& params);
    //! [MyLayer::create]

    //! [MyLayer::getMemoryShapes]
    virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
                                 const int requiredOutputs,
                                 std::vector<std::vector<int> > &outputs,
                                 std::vector<std::vector<int> > &internals) const CV_OVERRIDE;
    //! [MyLayer::getMemoryShapes]

    //! [MyLayer::forward]
    virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE;
    //! [MyLayer::forward]

    //! [MyLayer::finalize]
    virtual void finalize(const std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs) CV_OVERRIDE;
    //! [MyLayer::finalize]

    virtual void forward(cv::InputArrayOfArrays inputs, cv::OutputArrayOfArrays outputs, cv::OutputArrayOfArrays internals) CV_OVERRIDE;
};
//! [A custom layer interface]

235
//! [Register a custom layer]
236
#include <opencv2/dnn/layer.details.hpp>  // CV_DNN_REGISTER_LAYER_CLASS
237

238
static inline void loadNet()
239
{
240
    CV_DNN_REGISTER_LAYER_CLASS(Interp, InterpLayer);
241 242
    // ...
    //! [Register a custom layer]
243

244 245 246 247 248 249 250 251 252 253
    //! [Register InterpLayer]
    CV_DNN_REGISTER_LAYER_CLASS(Interp, InterpLayer);
    cv::dnn::Net caffeNet = cv::dnn::readNet("/path/to/config.prototxt", "/path/to/weights.caffemodel");
    //! [Register InterpLayer]

    //! [Register ResizeBilinearLayer]
    CV_DNN_REGISTER_LAYER_CLASS(ResizeBilinear, ResizeBilinearLayer);
    cv::dnn::Net tfNet = cv::dnn::readNet("/path/to/graph.pb");
    //! [Register ResizeBilinearLayer]

254
    if (false) loadNet();  // To prevent unused function warning.
255
}
256 257

#endif  // __OPENCV_SAMPLES_DNN_CUSTOM_LAYERS__