test_halide_layers.cpp 33.3 KB
Newer Older
1 2 3 4
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
5
// Copyright (C) 2017-2019, Intel Corporation, all rights reserved.
6 7 8 9 10
// Third party copyrights are property of their respective owners.

// This tests doesn't require any external data. They just compare outputs of
// layers using different computation backends. Input and parameters are random.

11 12
#include "test_precomp.hpp"

A
Alexander Alekhin 已提交
13
namespace opencv_test { namespace {
14 15 16 17 18

using namespace cv;
using namespace cv::dnn;
using namespace testing;

19
static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true, double l1 = 0.0, double lInf = 0.0)
20
{
D
Dmitry Kurtaev 已提交
21
    DNNTestLayer::checkBackend(backendId, targetId);
22 23
    if (randInput)
        randu(input, -1.0f, 1.0f);
24 25

    net.setInput(input);
26
    net.setPreferableBackend(DNN_BACKEND_OPENCV);
D
Dmitry Kurtaev 已提交
27
    Mat outputDefault = net.forward().clone();
28

D
Dmitry Kurtaev 已提交
29 30 31 32
    net.setPreferableBackend(backendId);
    net.setPreferableTarget(targetId);
    Mat outputHalide = net.forward().clone();

33 34 35
    if (skipCheck)
        return;

36 37 38 39 40 41
    double default_l1, default_lInf;
    DNNTestLayer::getDefaultThresholds(backendId, targetId, &default_l1, &default_lInf);
    if (l1 == 0.0)
        l1 = default_l1;
    if (lInf == 0.0)
        lInf = default_lInf;
42 43 44 45 46
#if 0
    std::cout << "l1=" << l1 << "  lInf=" << lInf << std::endl;
    std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
    std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
#endif
D
Dmitry Kurtaev 已提交
47 48 49
    normAssert(outputDefault, outputHalide, "", l1, lInf);
}

50
static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false, double l1 = 0.0, double lInf = 0.0)
D
Dmitry Kurtaev 已提交
51 52 53
{
    Net net;
    net.addLayerToPrev(params.name, params.type, params);
54
    test(input, net, backendId, targetId, skipCheck, true, l1, lInf);
D
Dmitry Kurtaev 已提交
55 56
}

57
static inline testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsWithHalide()
D
Dmitry Kurtaev 已提交
58
{
59
    return dnnBackendsAndTargets(true, true, false); // OpenCV/CPU is used as reference
60 61
}

D
Dmitry Kurtaev 已提交
62 63
class Test_Halide_layers : public DNNTestLayer {};

D
Dmitry Kurtaev 已提交
64 65 66
////////////////////////////////////////////////////////////////////////////////
// Padding
////////////////////////////////////////////////////////////////////////////////
D
Dmitry Kurtaev 已提交
67
TEST_P(Test_Halide_layers, Padding)
D
Dmitry Kurtaev 已提交
68 69 70
{
    static const int kNumRuns = 10;
    std::vector<int> paddings(8);
A
Alexander Alekhin 已提交
71
    cv::RNG& rng = cv::theRNG();
D
Dmitry Kurtaev 已提交
72 73 74
    for (int t = 0; t < kNumRuns; ++t)
    {
        for (int i = 0; i < paddings.size(); ++i)
A
Alexander Alekhin 已提交
75
            paddings[i] = rng(5);
D
Dmitry Kurtaev 已提交
76 77 78 79 80 81

        LayerParams lp;
        lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
        lp.type = "Padding";
        lp.name = "testLayer";

D
Dmitry Kurtaev 已提交
82 83 84
        int sz[] = {1 + (int)rng(10), 1 + (int)rng(10), 1 + (int)rng(10), 1 + (int)rng(10)};
        Mat input(4, &sz[0], CV_32F);
        test(lp, input, backend, target);
D
Dmitry Kurtaev 已提交
85 86 87
    }
}

88 89 90
////////////////////////////////////////////////////////////////////////////////
// Convolution
////////////////////////////////////////////////////////////////////////////////
91
typedef TestWithParam<tuple<Vec3i, Size, Size, Size, Size, Size, bool, tuple<Backend, Target> > > Convolution;
92 93 94 95 96 97 98 99 100 101 102
TEST_P(Convolution, Accuracy)
{
    int inChannels = get<0>(GetParam())[0];
    int outChannels = get<0>(GetParam())[1];
    int group = get<0>(GetParam())[2];
    Size inSize = get<1>(GetParam());
    Size kernel = get<2>(GetParam());
    Size stride = get<3>(GetParam());
    Size pad = get<4>(GetParam());
    Size dilation = get<5>(GetParam());
    bool hasBias = get<6>(GetParam());
103 104
    Backend backendId = get<0>(get<7>(GetParam()));
    Target targetId = get<1>(get<7>(GetParam()));
D
Dmitry Kurtaev 已提交
105

106
    bool skipCheck = false;
107

D
Dmitry Kurtaev 已提交
108 109
    int sz[] = {outChannels, inChannels / group, kernel.height, kernel.width};
    Mat weights(4, &sz[0], CV_32F);
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
    randu(weights, -1.0f, 1.0f);

    LayerParams lp;
    lp.set("kernel_w", kernel.width);
    lp.set("kernel_h", kernel.height);
    lp.set("pad_w", pad.width);
    lp.set("pad_h", pad.height);
    lp.set("stride_w", stride.width);
    lp.set("stride_h", stride.height);
    lp.set("dilation_w", dilation.width);
    lp.set("dilation_h", dilation.height);
    lp.set("num_output", outChannels);
    lp.set("group", group);
    lp.set("bias_term", hasBias);
    lp.type = "Convolution";
    lp.name = "testLayer";
    lp.blobs.push_back(weights);
    if (hasBias)
    {
D
Dmitry Kurtaev 已提交
129
        Mat bias(1, outChannels, CV_32F);
130 131 132
        randu(bias, -1.0f, 1.0f);
        lp.blobs.push_back(bias);
    }
D
Dmitry Kurtaev 已提交
133 134
    int inpSz[] = {1, inChannels, inSize.height, inSize.width};
    Mat input(4, &inpSz[0], CV_32F);
135 136 137
    test(lp, input, backendId, targetId, skipCheck);
    if (skipCheck)
        throw SkipTestException("Skip checks in unstable test");
138 139 140 141 142 143 144 145 146 147 148
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Convolution, Combine(
/*in channels, out channels, group*/
             Values(Vec3i(6, 4, 1), Vec3i(6, 9, 1),
                    Vec3i(6, 4, 2), Vec3i(6, 9, 3)),
/*in size*/  Values(Size(5, 6)),
/*kernel*/   Values(Size(3, 1), Size(1, 3)),
/*stride*/   Values(Size(1, 1), Size(2, 2)),
/*pad*/      Values(Size(1, 0), Size(0, 1)),
/*dilation*/ Values(Size(1, 1), Size(2, 2)),
D
Dmitry Kurtaev 已提交
149 150
/*has bias*/ Bool(),
             dnnBackendsAndTargetsWithHalide()
151 152 153 154 155
));

////////////////////////////////////////////////////////////////////////////////
// Deconvolution
////////////////////////////////////////////////////////////////////////////////
156
typedef TestWithParam<tuple<Vec3i, Size, Size, Size, Size, Vec4i, bool, tuple<Backend, Target> > > Deconvolution;
157 158 159 160 161 162 163 164 165 166 167 168
TEST_P(Deconvolution, Accuracy)
{
    int inChannels = get<0>(GetParam())[0];
    int outChannels = get<0>(GetParam())[1];
    int group = get<0>(GetParam())[2];
    Size inSize = get<1>(GetParam());
    Size kernel = get<2>(GetParam());
    Size pad = get<3>(GetParam());
    Size dilation = get<4>(GetParam());
    Size stride = Size(get<5>(GetParam())[0], get<5>(GetParam())[1]);
    Size adjPad = Size(get<5>(GetParam())[2], get<5>(GetParam())[3]);
    bool hasBias = get<6>(GetParam());
169 170
    Backend backendId = get<0>(get<7>(GetParam()));
    Target targetId = get<1>(get<7>(GetParam()));
171

172
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
173
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
174 175 176 177
            && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
            && inChannels == 6 && outChannels == 4 && group == 1
            && kernel == Size(1, 3) && pad == Size(1, 0)
            && stride == Size(1, 1) && dilation == Size(1, 1))
178
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
179 180
#endif

D
Dmitry Kurtaev 已提交
181 182
    int sz[] = {inChannels, outChannels / group, kernel.height, kernel.width};
    Mat weights(4, &sz[0], CV_32F);
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
    randu(weights, -1.0f, 1.0f);

    LayerParams lp;
    lp.set("kernel_w", kernel.width);
    lp.set("kernel_h", kernel.height);
    lp.set("pad_w", pad.width);
    lp.set("pad_h", pad.height);
    lp.set("stride_w", stride.width);
    lp.set("stride_h", stride.height);
    lp.set("dilation_w", dilation.width);
    lp.set("dilation_h", dilation.height);
    lp.set("adj_w", adjPad.width);
    lp.set("adj_h", adjPad.height);
    lp.set("num_output", outChannels);
    lp.set("group", group);
    lp.set("bias_term", hasBias);
    lp.type = "Deconvolution";
    lp.name = "testLayer";
    lp.blobs.push_back(weights);
    if (hasBias)
    {
D
Dmitry Kurtaev 已提交
204
        Mat bias(1, outChannels, CV_32F);
205 206 207
        randu(bias, -1.0f, 1.0f);
        lp.blobs.push_back(bias);
    }
D
Dmitry Kurtaev 已提交
208 209 210
    int inpSz[] = {1, inChannels, inSize.height, inSize.width};
    Mat input(4, &inpSz[0], CV_32F);
    test(lp, input, backendId, targetId);
211 212 213 214
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Deconvolution, Combine(
/*in channels, out channels, group*/
215
             Values(Vec3i(6, 4, 1), Vec3i(6, 9, 3)),
216 217 218
/*in size*/  Values(Size(5, 6)),
/*kernel*/   Values(Size(3, 1), Size(1, 3)),
/*pad*/      Values(Size(1, 0), Size(0, 1)),
219
/*dilation*/ Values(Size(1, 1)),
220
/*stride, adj. pad*/ Values(Vec4i(1,1, 0,0), Vec4i(2,2, 1,0), Vec4i(1,2, 0,1)),
D
Dmitry Kurtaev 已提交
221 222
/*has bias*/ Bool(),
             dnnBackendsAndTargetsWithHalide()
223 224 225 226 227
));

////////////////////////////////////////////////////////////////////////////////
// LRN
////////////////////////////////////////////////////////////////////////////////
228
typedef TestWithParam<tuple<Vec3i, int, Vec3f, bool, std::string, tuple<Backend, Target> > > LRN;
229 230 231 232 233 234 235 236 237 238
TEST_P(LRN, Accuracy)
{
    int inChannels = get<0>(GetParam())[0];
    Size inSize = Size(get<0>(GetParam())[1], get<0>(GetParam())[2]);
    int localSize = get<1>(GetParam());
    float alpha = get<2>(GetParam())[0];
    float beta = get<2>(GetParam())[1];
    float bias = get<2>(GetParam())[2];
    bool normBySize = get<3>(GetParam());
    std::string nrmType = get<4>(GetParam());
239 240
    Backend backendId = get<0>(get<5>(GetParam()));
    Target targetId = get<1>(get<5>(GetParam()));
241

242 243
    if ((inSize.width == 5 || inSize.height == 5) && targetId == DNN_TARGET_MYRIAD &&
        nrmType == "ACROSS_CHANNELS")
244
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
245

246 247 248 249 250 251 252 253 254 255
    LayerParams lp;
    lp.set("norm_region", nrmType);
    lp.set("local_size", localSize);
    lp.set("alpha", alpha);
    lp.set("beta", beta);
    lp.set("bias", bias);
    lp.set("norm_by_size", normBySize);
    lp.type = "LRN";
    lp.name = "testLayer";

D
Dmitry Kurtaev 已提交
256 257
    int sz[] = {1, inChannels, inSize.height, inSize.width};
    Mat input(4, &sz[0], CV_32F);
258 259 260 261 262 263 264 265 266 267 268

    double l1 = 0.0, lInf = 0.0;
    // The OpenCL kernels use the native_ math functions which have
    // implementation defined accuracy, so we use relaxed thresholds. See
    // https://github.com/opencv/opencv/issues/9821 for more details.
    if (targetId == DNN_TARGET_OPENCL)
    {
        l1 = 0.01;
        lInf = 0.01;
    }
    test(lp, input, backendId, targetId, false, l1, lInf);
269 270 271 272 273 274
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine(
/*input ch,w,h*/ Values(Vec3i(6, 5, 8), Vec3i(7, 11, 6)),
/*local size*/   Values(3, 5),
                 Values(Vec3f(0.9f, 1.0f, 1.1f), Vec3f(0.9f, 1.1f, 1.0f),
275 276
/*alpha, beta, bias*/   Vec3f(1.0f, 0.9f, 1.1f), Vec3f(1.0f, 1.1f, 0.9f),
                        Vec3f(1.1f, 0.9f, 1.0f), Vec3f(1.1f, 1.0f, 0.9f)),
277
/*norm_by_size*/ Bool(),
D
Dmitry Kurtaev 已提交
278 279
/*norm_type*/    Values("ACROSS_CHANNELS", "WITHIN_CHANNEL"),
                 dnnBackendsAndTargetsWithHalide()
280 281 282 283 284
));

////////////////////////////////////////////////////////////////////////////////
// Average pooling
////////////////////////////////////////////////////////////////////////////////
285
typedef TestWithParam<tuple<int, Size, Size, Size, tuple<Backend, Target> > > AvePooling;
286 287 288 289 290 291
TEST_P(AvePooling, Accuracy)
{
    int inChannels = get<0>(GetParam());
    Size outSize = get<1>(GetParam());;  // Input size will be computed from parameters.
    Size kernel = get<2>(GetParam());
    Size stride = get<3>(GetParam());
292 293
    Backend backendId = get<0>(get<4>(GetParam()));
    Target targetId = get<1>(get<4>(GetParam()));
294

295
#if defined(INF_ENGINE_RELEASE)
296
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
297 298
            && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
            && kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2)))
299
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
300 301
#endif

302 303 304 305 306 307 308 309 310 311 312 313
    const int inWidth = (outSize.width - 1) * stride.width + kernel.width;
    const int inHeight = (outSize.height - 1) * stride.height + kernel.height;

    LayerParams lp;
    lp.set("pool", "ave");
    lp.set("kernel_w", kernel.width);
    lp.set("kernel_h", kernel.height);
    lp.set("stride_w", stride.width);
    lp.set("stride_h", stride.height);
    lp.type = "Pooling";
    lp.name = "testLayer";

D
Dmitry Kurtaev 已提交
314 315 316
    int sz[] = {1, inChannels, inHeight, inWidth};
    Mat input(4, &sz[0], CV_32F);
    test(lp, input, backendId, targetId);
317 318 319 320 321 322
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, AvePooling, Combine(
/*in channels*/ Values(3, 4),
/*out size*/    Values(Size(1, 1), Size(2, 2), Size(3, 2), Size(4, 7)),
/*kernel*/      Values(Size(1, 1), Size(2, 2), Size(3, 3), Size(3, 2)),
D
Dmitry Kurtaev 已提交
323 324
/*stride*/      Values(Size(1, 1), Size(2, 2), Size(3, 2)),
                dnnBackendsAndTargetsWithHalide()
325 326 327 328 329
));

////////////////////////////////////////////////////////////////////////////////
// Maximum pooling
////////////////////////////////////////////////////////////////////////////////
330
typedef TestWithParam<tuple<int, Size, Size, Size, Size, tuple<Backend, Target> > > MaxPooling;
331 332 333 334 335 336 337
TEST_P(MaxPooling, Accuracy)
{
    int inChannels = get<0>(GetParam());
    Size inSize = get<1>(GetParam());
    Size kernel = get<2>(GetParam());
    Size stride = get<3>(GetParam());
    Size pad = get<4>(GetParam());
338 339
    Backend backendId = get<0>(get<5>(GetParam()));
    Target targetId = get<1>(get<5>(GetParam()));
340

341
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
342
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
343 344 345 346
            && inSize == Size(7, 6) && kernel == Size(3, 2)
            && (stride == Size(1, 1) || stride == Size(2, 2))
            && (pad == Size(0, 1) || pad == Size(1, 1))
    )
347
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
348 349 350
#endif

#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
351
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
352 353 354
            && (kernel == Size(2, 2) || kernel == Size(3, 2))
            && stride == Size(1, 1) && (pad == Size(0, 0) || pad == Size(0, 1))
    )
355
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
356 357
#endif

358
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
359
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
360 361 362 363
            && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
            && (stride == Size(1, 1) || stride == Size(2, 2))
            && (pad == Size(0, 1) || pad == Size(1, 1))
    )
364 365 366
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif

367 368 369 370 371
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_MYRIAD)
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif

372 373 374 375 376 377 378 379 380 381 382
    LayerParams lp;
    lp.set("pool", "max");
    lp.set("kernel_w", kernel.width);
    lp.set("kernel_h", kernel.height);
    lp.set("stride_w", stride.width);
    lp.set("stride_h", stride.height);
    lp.set("pad_w", pad.width);
    lp.set("pad_h", pad.height);
    lp.type = "Pooling";
    lp.name = "testLayer";

D
Dmitry Kurtaev 已提交
383 384 385
    int sz[] = {1, inChannels, inSize.height, inSize.width};
    Mat input(4, &sz[0], CV_32F);
    test(lp, input, backendId, targetId);
386 387 388 389 390 391 392
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, MaxPooling, Combine(
/*in channels*/ Values(3, 4),
/*in size*/     Values(Size(5, 5), Size(7, 6)),
/*kernel*/      Values(Size(2, 2), Size(3, 3), Size(3, 2)),
/*stride*/      Values(Size(1, 1), Size(2, 2), Size(3, 2)),
D
Dmitry Kurtaev 已提交
393 394
/*pad*/         Values(Size(0, 0), Size(1, 1), Size(0, 1)),
                dnnBackendsAndTargetsWithHalide()
395 396 397 398 399
));

////////////////////////////////////////////////////////////////////////////////
// Fully-connected
////////////////////////////////////////////////////////////////////////////////
400
typedef TestWithParam<tuple<int, Size, int, bool, tuple<Backend, Target> > > FullyConnected;
401 402 403 404 405 406
TEST_P(FullyConnected, Accuracy)
{
    int inChannels = get<0>(GetParam());
    Size inSize = get<1>(GetParam());
    int outChannels = get<2>(GetParam());
    bool hasBias = get<3>(GetParam());
407 408
    Backend backendId = get<0>(get<4>(GetParam()));
    Target targetId = get<1>(get<4>(GetParam()));
409 410
    if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
         backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && (targetId == DNN_TARGET_OPENCL_FP16 ||
411 412 413 414
       (targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X))) {
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
    }
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429

    Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F);
    randu(weights, -1.0f, 1.0f);

    Mat bias(1, outChannels, CV_32F);
    randu(bias, -1.0f, 1.0f);

    LayerParams lp;
    lp.set("num_output", outChannels);
    lp.set("bias_term", hasBias);
    lp.blobs.push_back(weights);
    lp.blobs.push_back(bias);
    lp.type = "InnerProduct";
    lp.name = "testLayer";

D
Dmitry Kurtaev 已提交
430 431 432
    int sz[] = {1, inChannels, inSize.height, inSize.width};
    Mat input(4, &sz[0], CV_32F);
    test(lp, input, backendId, targetId);
433 434 435 436 437 438
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, FullyConnected, Combine(
/*in channels*/  Values(3, 4),
/*in size*/      Values(Size(5, 4), Size(4, 5), Size(1, 1)),
/*out channels*/ Values(3, 4),
D
Dmitry Kurtaev 已提交
439 440
/*has bias*/     Bool(),
                 dnnBackendsAndTargetsWithHalide()
441 442 443 444 445
));

////////////////////////////////////////////////////////////////////////////////
// SoftMax
////////////////////////////////////////////////////////////////////////////////
446
typedef TestWithParam<tuple<int,  tuple<Backend, Target> > > SoftMax;
447 448 449
TEST_P(SoftMax, Accuracy)
{
    int inChannels = get<0>(GetParam());
450 451
    Backend backendId = get<0>(get<1>(GetParam()));
    Target targetId = get<1>(get<1>(GetParam()));
452
    LayerParams lp;
D
Dmitry Kurtaev 已提交
453
    lp.type = "Softmax";
454 455
    lp.name = "testLayer";

D
Dmitry Kurtaev 已提交
456 457 458
    int sz[] = {1, inChannels, 1, 1};
    Mat input(4, &sz[0], CV_32F);
    test(lp, input, backendId, targetId);
459 460
}

D
Dmitry Kurtaev 已提交
461 462 463 464
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, SoftMax, Combine(
    Values(3, 4, 5, 1024),
    dnnBackendsAndTargetsWithHalide()
));
465 466 467 468

//////////////////////////////////////////////////////////////////////////////
// Max pooling - unpooling
//////////////////////////////////////////////////////////////////////////////
D
Dmitry Kurtaev 已提交
469
TEST_P(Test_Halide_layers, MaxPoolUnpool)
470
{
471 472 473 474
    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
D
Dmitry Kurtaev 已提交
475

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
    LayerParams pool;
    pool.set("pool", "max");
    pool.set("kernel_w", 2);
    pool.set("kernel_h", 2);
    pool.set("stride_w", 2);
    pool.set("stride_h", 2);
    pool.set("pad_w", 0);
    pool.set("pad_h", 0);
    pool.type = "Pooling";
    pool.name = "testPool";

    LayerParams unpool;
    unpool.set("pool_k_w", 2);
    unpool.set("pool_k_h", 2);
    unpool.set("pool_stride_w", 2);
    unpool.set("pool_stride_h", 2);
    unpool.set("pool_pad_w", 0);
    unpool.set("pool_pad_h", 0);
    unpool.type = "MaxUnpool";
    unpool.name = "testUnpool";

    Net net;
    int poolId = net.addLayer(pool.name, pool.type, pool);
    net.connect(0, 0, poolId, 0);

    int unpoolId = net.addLayer(unpool.name, unpool.type, unpool);
    net.connect(poolId, 0, unpoolId, 0);
    net.connect(poolId, 1, unpoolId, 1);

D
Dmitry Kurtaev 已提交
505 506 507
    int sz[] = {1, 1, 4, 4};
    Mat input(4, &sz[0], CV_32F);
    test(input, net, backend, target);
508 509 510 511 512 513 514
}

////////////////////////////////////////////////////////////////////////////////
// AvePooling + in-place layers
////////////////////////////////////////////////////////////////////////////////
static const int kNumChannels = 3;

515
void testInPlaceActivation(LayerParams& lp, Backend backendId, Target targetId)
516 517 518 519 520 521 522 523 524 525
{
    EXPECT_FALSE(lp.name.empty());

    LayerParams pool;
    pool.set("pool", "ave");
    pool.set("kernel_w", 2);
    pool.set("kernel_h", 2);
    pool.set("stride_w", 2);
    pool.set("stride_h", 2);
    pool.type = "Pooling";
526
    pool.name = "ave_pool";
527 528 529 530 531 532

    Net net;
    int poolId = net.addLayer(pool.name, pool.type, pool);
    net.connect(0, 0, poolId, 0);
    net.addLayerToPrev(lp.name, lp.type, lp);

D
Dmitry Kurtaev 已提交
533 534 535
    int sz[] = {1, kNumChannels, 10, 10};
    Mat input(4, &sz[0], CV_32F);
    test(input, net, backendId, targetId);
536 537
}

538
typedef TestWithParam<tuple<bool, bool, float, tuple<Backend, Target> > > BatchNorm;
539 540 541 542 543
TEST_P(BatchNorm, Accuracy)
{
    bool hasWeights = get<0>(GetParam());
    bool hasBias = get<1>(GetParam());
    float epsilon = get<2>(GetParam());
544 545
    Backend backendId = get<0>(get<3>(GetParam()));
    Target targetId = get<1>(get<3>(GetParam()));
546 547 548 549 550 551 552 553 554 555

    LayerParams lp;
    lp.set("has_weight", hasWeights);
    lp.set("has_bias", hasBias);
    lp.set("eps", epsilon);
    lp.type = "BatchNorm";
    lp.name = "testLayer";

    lp.blobs.reserve(4);
    for (int i = 0; i < 3; ++i)
D
Dmitry Kurtaev 已提交
556
        lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
557
    if (hasBias || hasWeights)
D
Dmitry Kurtaev 已提交
558
        lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
559

D
Dmitry Kurtaev 已提交
560 561
    for (int i = 0; i < lp.blobs.size(); ++i)
        randu(lp.blobs[i], 0.0f, 1.0f);
562

D
Dmitry Kurtaev 已提交
563
    testInPlaceActivation(lp, backendId, targetId);
564 565 566 567 568
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, BatchNorm, Combine(
/*has weights*/ Bool(),
/*has bias*/    Bool(),
D
Dmitry Kurtaev 已提交
569 570
/*epsilon*/     Values(1e-3f, 1e-5f),
                dnnBackendsAndTargetsWithHalide()
571 572
));

573
typedef TestWithParam<tuple<float, tuple<Backend, Target> > > ReLU;
574 575 576
TEST_P(ReLU, Accuracy)
{
    float negativeSlope = get<0>(GetParam());
577 578
    Backend backendId = get<0>(get<1>(GetParam()));
    Target targetId = get<1>(get<1>(GetParam()));
579

A
Alexander Alekhin 已提交
580
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000)
581 582
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD && negativeSlope < 0)
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
583 584
#endif

585 586 587 588
    LayerParams lp;
    lp.set("negative_slope", negativeSlope);
    lp.type = "ReLU";
    lp.name = "testLayer";
D
Dmitry Kurtaev 已提交
589
    testInPlaceActivation(lp, backendId, targetId);
590 591
}

D
Dmitry Kurtaev 已提交
592 593 594
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, ReLU, Combine(
/*negative slope*/ Values(2.0f, 0.3f, -0.1f, 0.0f),
                   dnnBackendsAndTargetsWithHalide()
595 596
));

597
typedef TestWithParam<tuple<std::string, tuple<Backend, Target> > > NoParamActivation;
598 599
TEST_P(NoParamActivation, Accuracy)
{
600 601
    Backend backendId = get<0>(get<1>(GetParam()));
    Target targetId = get<1>(get<1>(GetParam()));
D
Dmitry Kurtaev 已提交
602

603 604 605
    LayerParams lp;
    lp.type = get<0>(GetParam());
    lp.name = "testLayer";
D
Dmitry Kurtaev 已提交
606
    testInPlaceActivation(lp, backendId, targetId);
607
}
D
Dmitry Kurtaev 已提交
608
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(
T
thebhatman 已提交
609
/*type*/ Values("TanH", "Sigmoid", "AbsVal", "BNLL", "Swish", "Mish"),
D
Dmitry Kurtaev 已提交
610
         dnnBackendsAndTargetsWithHalide()
611 612
));

613
typedef TestWithParam<tuple<Vec3f, tuple<Backend, Target> > > Power;
614 615 616 617 618
TEST_P(Power, Accuracy)
{
    float power = get<0>(GetParam())[0];
    float scale = get<0>(GetParam())[1];
    float shift = get<0>(GetParam())[2];
619 620
    Backend backendId = get<0>(get<1>(GetParam()));
    Target targetId = get<1>(get<1>(GetParam()));
621 622 623 624 625 626 627

    LayerParams lp;
    lp.set("power", power);
    lp.set("scale", scale);
    lp.set("shift", shift);
    lp.type = "Power";
    lp.name = "testLayer";
D
Dmitry Kurtaev 已提交
628
    testInPlaceActivation(lp, backendId, targetId);
629 630
}

D
Dmitry Kurtaev 已提交
631
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Power, Combine(
632 633
/*power, scale, shift*/ Values(Vec3f(0.9f, 1.0f, 1.1f), Vec3f(0.9f, 1.1f, 1.0f),
                               Vec3f(1.0f, 0.9f, 1.1f), Vec3f(1.0f, 1.1f, 0.9f),
D
Dmitry Kurtaev 已提交
634 635 636
                               Vec3f(1.1f, 0.9f, 1.0f), Vec3f(1.1f, 1.0f, 0.9f)),
                        dnnBackendsAndTargetsWithHalide()
));
637

S
SamFC10 已提交
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
typedef TestWithParam<tuple<Vec3f, tuple<Backend, Target> > > Exp;
TEST_P(Exp, Accuracy)
{
    float base = get<0>(GetParam())[0];
    float scale = get<0>(GetParam())[1];
    float shift = get<0>(GetParam())[2];
    Backend backendId = get<0>(get<1>(GetParam()));
    Target targetId = get<1>(get<1>(GetParam()));

    LayerParams lp;
    lp.set("base", base);
    lp.set("scale", scale);
    lp.set("shift", shift);
    lp.type = "Exp";
    lp.name = "testLayer";
    testInPlaceActivation(lp, backendId, targetId);
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Exp, Combine(
/*base, scale, shift*/ Values(Vec3f(0.9f, -1.0f, 1.1f), Vec3f(0.9f, 1.1f, -1.0f),
                              Vec3f(-1.0f, 0.9f, 1.1f), Vec3f(-1.0f, 1.1f, 0.9f),
                              Vec3f(1.1f, 0.9f, -1.0f), Vec3f(1.1f, -1.0f, 0.9f)),
                       dnnBackendsAndTargetsWithHalide()
));

D
Dmitry Kurtaev 已提交
663
TEST_P(Test_Halide_layers, ChannelsPReLU)
664 665 666 667
{
    LayerParams lp;
    lp.type = "ChannelsPReLU";
    lp.name = "testLayer";
D
Dmitry Kurtaev 已提交
668
    lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
669 670
    randu(lp.blobs[0], -1.0f, 1.0f);

D
Dmitry Kurtaev 已提交
671
    testInPlaceActivation(lp, backend, target);
672 673
}

674
typedef TestWithParam<tuple<bool, tuple<Backend, Target> > > Scale;
675 676 677
TEST_P(Scale, Accuracy)
{
    bool hasBias = get<0>(GetParam());
678 679
    Backend backendId = get<0>(get<1>(GetParam()));
    Target targetId = get<1>(get<1>(GetParam()));
680 681 682 683 684

    LayerParams lp;
    lp.set("bias_term", hasBias);
    lp.type = "Scale";
    lp.name = "testLayer";
D
Dmitry Kurtaev 已提交
685
    lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
686 687 688
    randu(lp.blobs[0], -1.0f, 1.0f);
    if (hasBias)
    {
D
Dmitry Kurtaev 已提交
689
        lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
690 691
        randu(lp.blobs[1], -1.0f, 1.0f);
    }
D
Dmitry Kurtaev 已提交
692
    testInPlaceActivation(lp, backendId, targetId);
693 694
}

D
Dmitry Kurtaev 已提交
695 696 697 698
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Scale, Combine(
    Bool(),
    dnnBackendsAndTargetsWithHalide()
));
699 700 701 702 703 704 705 706 707

////////////////////////////////////////////////////////////////////////////////
// Concat layer
////////////////////////////////////////////////////////////////////////////////
//
// input --- conv --- concat --- output
//      `--- conv ----^ ^ ^
//      `---- ... ------' '
//      `-----------------'
708
typedef TestWithParam<tuple<Vec3i, Vec3i, tuple<Backend, Target> > > Concat;
709 710 711 712
TEST_P(Concat, Accuracy)
{
    Vec3i inSize = get<0>(GetParam());
    Vec3i numChannels = get<1>(GetParam());
713 714
    Backend backendId = get<0>(get<2>(GetParam()));
    Target targetId = get<1>(get<2>(GetParam()));
715

716
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
717
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
718 719
            && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
    )
720
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);  // crash
721 722
#endif

723
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
724
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_CPU
725 726
            && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
    )
727
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);  // TODO: IE_CPU
728 729
#endif

730 731
    Net net;

732 733
    std::vector<int> convLayerIds;
    convLayerIds.reserve(numChannels.channels);
734 735 736 737 738
    for (int i = 0, n = numChannels.channels; i < n; ++i)
    {
        if (!numChannels[i])
            break;

D
Dmitry Kurtaev 已提交
739 740
        int sz[] = {numChannels[i], inSize[0], 1, 1};
        Mat weights(4, &sz[0], CV_32F);
741 742 743 744 745 746 747 748 749 750 751 752 753
        randu(weights, -1.0f, 1.0f);

        LayerParams convParam;
        convParam.set("kernel_w", 1);
        convParam.set("kernel_h", 1);
        convParam.set("num_output", numChannels[i]);
        convParam.set("bias_term", false);
        convParam.type = "Convolution";
        std::ostringstream ss;
        ss << "convLayer" << i;
        convParam.name = ss.str();
        convParam.blobs.push_back(weights);

754 755 756
        int layerId = net.addLayer(convParam.name, convParam.type, convParam);
        convLayerIds.push_back(layerId);
        net.connect(0, 0, layerId, 0);
D
dkurt 已提交
757 758 759 760 761 762 763 764 765 766
    }

    LayerParams concatParam;
    concatParam.type = "Concat";
    concatParam.name = "testLayer";
    int concatId = net.addLayer(concatParam.name, concatParam.type, concatParam);
    net.connect(0, 0, concatId, 0);
    for (int i = 0; i < convLayerIds.size(); ++i)
    {
        net.connect(convLayerIds[i], 0, concatId, i + 1);
767 768
    }

D
Dmitry Kurtaev 已提交
769 770 771
    int sz[] = {1, inSize[0], inSize[1], inSize[2]};
    Mat input(4, &sz[0], CV_32F);
    test(input, net, backendId, targetId);
772 773 774 775
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Concat, Combine(
/*input size*/ Values(Vec3i(1, 4, 5), Vec3i(2, 8, 6)),
D
Dmitry Kurtaev 已提交
776 777
/*channels*/   Values(Vec3i(2, 0, 0), Vec3i(3, 4, 0), Vec3i(1, 6, 2)),
               dnnBackendsAndTargetsWithHalide()
778 779 780 781 782 783 784 785 786 787
));

////////////////////////////////////////////////////////////////////////////////
// Element-wise layers
////////////////////////////////////////////////////////////////////////////////
//
// input --- conv --- eltwise --- output
//      `--- conv ----^ ^ ^
//      `---- ... ------' '
//      `-----------------'
788
typedef TestWithParam<tuple<Vec3i, std::string, int, bool, tuple<Backend, Target> > > Eltwise;
789 790 791 792 793
TEST_P(Eltwise, Accuracy)
{
    Vec3i inSize = get<0>(GetParam());
    std::string op = get<1>(GetParam());
    int numConv = get<2>(GetParam());
D
dkurt 已提交
794
    bool weighted = get<3>(GetParam());
795 796
    Backend backendId = get<0>(get<4>(GetParam()));
    Target targetId = get<1>(get<4>(GetParam()));
D
Dmitry Kurtaev 已提交
797

798
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
799
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD &&
800
        inSize == Vec3i(1, 4, 5))
801
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
802 803
#endif

804
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
805 806
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && numConv > 1)
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
807 808
#endif

809
#if defined(INF_ENGINE_RELEASE)
810
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_OPENCL &&
811
        op == "sum" && numConv == 1 && !weighted)
812 813 814 815 816 817
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif

#if defined(INF_ENGINE_RELEASE)
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && numConv > 1)
        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
818 819
#endif

820 821 822 823 824 825 826 827
    bool convInputShift = 1;
    int numEltwiseInputs = numConv;
    if (op == "div")
    {
        numConv = 1;
        convInputShift = 0; // first input is convolution
    }

828 829
    Net net;

D
dkurt 已提交
830
    std::vector<int> convLayerIds(numConv);
831 832
    for (int i = 0; i < numConv; ++i)
    {
D
Dmitry Kurtaev 已提交
833 834
        int sz[] = {inSize[0], inSize[0], 1, 1};
        Mat weights(4, &sz[0], CV_32F);
835 836 837 838 839 840 841 842 843 844 845 846 847
        randu(weights, -1.0f, 1.0f);

        LayerParams convParam;
        convParam.set("kernel_w", 1);
        convParam.set("kernel_h", 1);
        convParam.set("num_output", inSize[0]);
        convParam.set("bias_term", false);
        convParam.type = "Convolution";
        std::ostringstream ss;
        ss << "convLayer" << i;
        convParam.name = ss.str();
        convParam.blobs.push_back(weights);

D
dkurt 已提交
848 849 850 851 852
        convLayerIds[i] = net.addLayer(convParam.name, convParam.type, convParam);
        net.connect(0, 0, convLayerIds[i], 0);
    }

    LayerParams eltwiseParam;
D
dkurt 已提交
853 854 855
    eltwiseParam.set("operation", op);
    if (op == "sum" && weighted)
    {
A
Alexander Alekhin 已提交
856
        RNG& rng = cv::theRNG();
D
dkurt 已提交
857 858 859
        std::vector<float> coeff(1 + numConv);
        for (int i = 0; i < coeff.size(); ++i)
        {
T
Tomoaki Teshima 已提交
860
            coeff[i] = rng.uniform(-2.0f, 2.0f);
D
dkurt 已提交
861 862 863
        }
        eltwiseParam.set("coeff", DictValue::arrayReal<float*>(&coeff[0], coeff.size()));
    }
D
dkurt 已提交
864 865 866
    eltwiseParam.type = "Eltwise";
    eltwiseParam.name = "testLayer";
    int eltwiseId = net.addLayer(eltwiseParam.name, eltwiseParam.type, eltwiseParam);
867 868
    if (convInputShift == 1)
        net.connect(0, 0, eltwiseId, 0);
D
dkurt 已提交
869 870
    for (int i = 0; i < numConv; ++i)
    {
871 872 873 874 875 876 877
        net.connect(convLayerIds[i], 0, eltwiseId, i + convInputShift);
    }
    if (convInputShift == 0)
        net.connect(0, 0, eltwiseId, numConv);
    for (int i = numConv; i < numEltwiseInputs; ++i)
    {
        net.connect(0, 0, eltwiseId, i + 1);
878 879
    }

D
Dmitry Kurtaev 已提交
880 881
    int sz[] = {1, inSize[0], inSize[1], inSize[2]};
    Mat input(4, &sz[0], CV_32F);
882 883 884
    if (op == "div")
        randu(input, 1.0f, 1.0f);  // ensure no divisor value has absouluate value of less than 0.5
    test(input, net, backendId, targetId, /*skipCheck*/false, (op == "div") ? false : true);
885 886 887 888
}

INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Eltwise, Combine(
/*input size*/ Values(Vec3i(1, 4, 5), Vec3i(2, 8, 6)),
889
/*operation*/  Values("prod", "sum", "div", "max"),
D
dkurt 已提交
890
/*num convs*/  Values(1, 2, 3),
D
Dmitry Kurtaev 已提交
891 892
/*weighted(for sum only)*/ Bool(),
               dnnBackendsAndTargetsWithHalide()
893
));
894 895 896 897

////////////////////////////////////////////////////////////////////////////
// Mixed backends
////////////////////////////////////////////////////////////////////////////
D
Dmitry Kurtaev 已提交
898
#ifdef HAVE_HALIDE
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
TEST(MixedBackends_Halide_Default_Halide, Accuracy)
{
    // Just a layer that supports Halide backend.
    LayerParams lrn;
    lrn.type = "LRN";
    lrn.name = "testLRN";

    // Some of layers that doesn't supports Halide backend yet.
    LayerParams mvn;
    mvn.type = "MVN";
    mvn.name = "testMVN";

    // Halide layer again.
    LayerParams lrn2;
    lrn2.type = "LRN";
    lrn2.name = "testLRN2";

    Net net;
    int lrnId = net.addLayer(lrn.name, lrn.type, lrn);
    net.connect(0, 0, lrnId, 0);
    net.addLayerToPrev(mvn.name, mvn.type, mvn);
    net.addLayerToPrev(lrn2.name, lrn2.type, lrn2);

D
Dmitry Kurtaev 已提交
922 923
    int sz[] = {4, 3, 5, 6};
    Mat input(4, &sz[0], CV_32F);
924 925
    randu(input, -1.0f, 1.0f);
    net.setInput(input);
926
    net.setPreferableBackend(DNN_BACKEND_OPENCV);
927 928 929 930 931 932 933 934 935 936 937 938
    Mat outputDefault = net.forward().clone();

    net.setPreferableBackend(DNN_BACKEND_HALIDE);
    net.setInput(input);
    Mat outputHalide = net.forward().clone();
    normAssert(outputDefault, outputHalide);

    net.setPreferableTarget(DNN_TARGET_OPENCL);
    net.setInput(input);
    outputHalide = net.forward().clone();
    normAssert(outputDefault, outputHalide);
}
939 940
#endif  // HAVE_HALIDE

D
Dmitry Kurtaev 已提交
941 942
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_Halide_layers, dnnBackendsAndTargetsWithHalide());

A
Alexander Alekhin 已提交
943
}} // namespace