diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index c50dae796780f792f65ca064b4c8b5dcbe7f34e2..9ee688f497e5ec5c01b3d45e79c64b6f4e34ac23 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2460,10 +2460,12 @@ struct Net::Impl : public detail::NetImplBase if( nextData ) nextActivLayer = nextData->layerInstance.dynamicCast(); + Ptr activ_power; if( !nextActivLayer.empty() && (!nextData->type.compare("ReLU") || !nextData->type.compare("ChannelsPReLU") || - !nextData->type.compare("Power")) && + (!nextData->type.compare("Power") && (activ_power = nextActivLayer.dynamicCast()) && activ_power->scale == 1.0f) + ) && currLayer->setActivation(nextActivLayer) ) { CV_Assert_N(biasLayerData->outputBlobsWrappers.size() == 1, ld.inputBlobsWrappers.size() == 1); diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index ddc318def2f82a89bb853c5ad2abfbba8ae4bb5e..206ce72fa0b3c0ecd84b32b6df55902e7a4e9ee4 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -46,6 +46,8 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" +#include + #include "opencv2/core/hal/hal.hpp" #include "opencv2/core/hal/intrin.hpp" #include @@ -371,6 +373,14 @@ public: Ptr activ_power = activ.dynamicCast(); if (!activ_power.empty()) { + if (activ_power->scale != 1.0f) // not supported well by implementation, #17964 + { + // FIXIT no way to check number of blobs (like, eltwise input) + CV_LOG_INFO(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)"); + activ.release(); + newActiv = false; + return false; + } if (activ_power->scale != 1.f || activ_power->shift != 0.f) { const int outCh = blobs[0].size[0]; diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp index 559b74f126dabb5dfa5b19f60296808ee6d4aee9..e55e6cb7b3b20fa0c176742321b746bb78bf49c0 100644 --- a/modules/dnn/test/test_common.impl.hpp +++ b/modules/dnn/test/test_common.impl.hpp @@ -63,10 +63,10 @@ void normAssert( double l1 /*= 0.00001*/, double lInf /*= 0.0001*/) { double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); - EXPECT_LE(normL1, l1) << comment; + EXPECT_LE(normL1, l1) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF); double normInf = cvtest::norm(ref, test, cv::NORM_INF); - EXPECT_LE(normInf, lInf) << comment; + EXPECT_LE(normInf, lInf) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF); } std::vector matToBoxes(const cv::Mat& m) diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index e61b754b86d9a2accdaad01f30654b18290972ce..3872f562efbf663962540df53239a640b2d58e99 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -2219,10 +2219,6 @@ TEST_P(ConvolutionActivationFusion, Accuracy) Backend backendId = get<0>(get<2>(GetParam())); Target targetId = get<1>(get<2>(GetParam())); - // bug: https://github.com/opencv/opencv/issues/17964 - if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int activId = net.addLayerToPrev(activationParams.name, activationParams.type, activationParams); @@ -2235,7 +2231,7 @@ TEST_P(ConvolutionActivationFusion, Accuracy) expectedFusedLayers.push_back(activId); // all activations are fused else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) { - if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power") + if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/) expectedFusedLayers.push_back(activId); } } @@ -2349,10 +2345,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy) if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - // bug: https://github.com/opencv/opencv/issues/17964 - if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams); @@ -2369,7 +2361,7 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy) expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) { - if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "Power") + if (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/) { expectedFusedLayers.push_back(eltwiseId); expectedFusedLayers.push_back(activId); @@ -2431,10 +2423,6 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy) Backend backendId = get<0>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam())); - // bug: https://github.com/opencv/opencv/issues/17964 - if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int activId = net.addLayer(activationParams.name, activationParams.type, activationParams); @@ -2451,7 +2439,7 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy) expectedFusedLayers.push_back(activId); // activation fused with convolution else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) { - if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power") + if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/) expectedFusedLayers.push_back(activId); // activation fused with convolution } }