提交 26ea4760 编写于 作者: A Alexander Alekhin

Merge pull request #19774 from aarongreig:aaron/dnn/oclTestAccuracyThresholds

...@@ -16,7 +16,7 @@ using namespace cv; ...@@ -16,7 +16,7 @@ using namespace cv;
using namespace cv::dnn; using namespace cv::dnn;
using namespace testing; using namespace testing;
static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true) static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true, double l1 = 0.0, double lInf = 0.0)
{ {
DNNTestLayer::checkBackend(backendId, targetId); DNNTestLayer::checkBackend(backendId, targetId);
if (randInput) if (randInput)
...@@ -33,8 +33,12 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool ...@@ -33,8 +33,12 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
if (skipCheck) if (skipCheck)
return; return;
double l1, lInf; double default_l1, default_lInf;
DNNTestLayer::getDefaultThresholds(backendId, targetId, &l1, &lInf); DNNTestLayer::getDefaultThresholds(backendId, targetId, &default_l1, &default_lInf);
if (l1 == 0.0)
l1 = default_l1;
if (lInf == 0.0)
lInf = default_lInf;
#if 0 #if 0
std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl; std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl; std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
...@@ -43,11 +47,11 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool ...@@ -43,11 +47,11 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
normAssert(outputDefault, outputHalide, "", l1, lInf); normAssert(outputDefault, outputHalide, "", l1, lInf);
} }
static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false) static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false, double l1 = 0.0, double lInf = 0.0)
{ {
Net net; Net net;
net.addLayerToPrev(params.name, params.type, params); net.addLayerToPrev(params.name, params.type, params);
test(input, net, backendId, targetId, skipCheck); test(input, net, backendId, targetId, skipCheck, true, l1, lInf);
} }
static inline testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsWithHalide() static inline testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsWithHalide()
...@@ -251,7 +255,17 @@ TEST_P(LRN, Accuracy) ...@@ -251,7 +255,17 @@ TEST_P(LRN, Accuracy)
int sz[] = {1, inChannels, inSize.height, inSize.width}; int sz[] = {1, inChannels, inSize.height, inSize.width};
Mat input(4, &sz[0], CV_32F); Mat input(4, &sz[0], CV_32F);
test(lp, input, backendId, targetId);
double l1 = 0.0, lInf = 0.0;
// The OpenCL kernels use the native_ math functions which have
// implementation defined accuracy, so we use relaxed thresholds. See
// https://github.com/opencv/opencv/issues/9821 for more details.
if (targetId == DNN_TARGET_OPENCL)
{
l1 = 0.01;
lInf = 0.01;
}
test(lp, input, backendId, targetId, false, l1, lInf);
} }
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine( INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine(
......
...@@ -169,8 +169,17 @@ TEST_P(Test_Caffe_layers, Softmax) ...@@ -169,8 +169,17 @@ TEST_P(Test_Caffe_layers, Softmax)
TEST_P(Test_Caffe_layers, LRN) TEST_P(Test_Caffe_layers, LRN)
{ {
testLayerUsingCaffeModels("layer_lrn_spatial"); double l1 = 0.0, lInf = 0.0;
testLayerUsingCaffeModels("layer_lrn_channels"); // The OpenCL kernels use the native_ math functions which have
// implementation defined accuracy, so we use relaxed thresholds. See
// https://github.com/opencv/opencv/issues/9821 for more details.
if (target == DNN_TARGET_OPENCL)
{
l1 = 0.01;
lInf = 0.01;
}
testLayerUsingCaffeModels("layer_lrn_spatial", false, true, l1, lInf);
testLayerUsingCaffeModels("layer_lrn_channels", false, true, l1, lInf);
} }
TEST_P(Test_Caffe_layers, Convolution) TEST_P(Test_Caffe_layers, Convolution)
......
...@@ -218,9 +218,21 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn) ...@@ -218,9 +218,21 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTorchNet("net_conv_gemm_lrn", "", false, true, true, double l1 = 0.0, lInf = 0.0;
target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0, if (target == DNN_TARGET_OPENCL_FP16)
target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0); {
l1 = 0.046;
lInf = 0.023;
}
// The OpenCL kernels use the native_ math functions which have
// implementation defined accuracy, so we use relaxed thresholds. See
// https://github.com/opencv/opencv/issues/9821 for more details.
else if (target == DNN_TARGET_OPENCL)
{
l1 = 0.02;
lInf = 0.02;
}
runTorchNet("net_conv_gemm_lrn", "", false, true, true, l1, lInf);
} }
TEST_P(Test_Torch_layers, net_inception_block) TEST_P(Test_Torch_layers, net_inception_block)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册