提交 6ec23048 编写于 作者: D Dmitry Kurtaev

Enable Myriad tests with batch size > 1

上级 0f0a82b6
......@@ -93,8 +93,10 @@ TEST_P(Convolution, Accuracy)
Backend backendId = get<0>(get<7>(GetParam()));
Target targetId = get<1>(get<7>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD)
throw SkipTestException("");
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
bool skipCheck = false;
if (cvtest::skipUnstableTests && backendId == DNN_BACKEND_OPENCV &&
......@@ -274,7 +276,8 @@ TEST_P(AvePooling, Accuracy)
Size stride = get<3>(GetParam());
Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam()));
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD &&
stride == Size(3, 2) && kernel == Size(3, 3) && outSize != Size(1, 1))
throw SkipTestException("");
const int inWidth = (outSize.width - 1) * stride.width + kernel.width;
......
......@@ -215,8 +215,10 @@ TEST(Layer_Test_Reshape, Accuracy)
TEST_P(Test_Caffe_layers, BatchNorm)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("");
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
testLayerUsingCaffeModels("layer_batch_norm", true);
testLayerUsingCaffeModels("layer_batch_norm_local_stats", true, false);
}
......@@ -729,8 +731,10 @@ INSTANTIATE_TEST_CASE_P(Layer_Test, Crop, Combine(
// into the normalization area.
TEST_P(Test_Caffe_layers, Average_pooling_kernel_area)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
LayerParams lp;
lp.name = "testAvePool";
lp.type = "Pooling";
......
......@@ -111,6 +111,7 @@ public:
{
throw SkipTestException("Myriad is not available/disabled in OpenCV");
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (inp && ref && inp->size[0] != 1)
{
// Myriad plugin supports only batch size 1. Slice a single sample.
......@@ -127,6 +128,12 @@ public:
else
throw SkipTestException("Myriad plugin supports only batch size 1");
}
#else
if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
inp->size[0] != 1 && inp->size[0] != ref->size[0])
throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
#endif
}
}
......
......@@ -144,8 +144,10 @@ TEST_P(Test_TensorFlow_layers, eltwise_add_mul)
TEST_P(Test_TensorFlow_layers, pad_and_concat)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
runTensorFlowNet("pad_and_concat");
}
......@@ -180,8 +182,10 @@ TEST_P(Test_TensorFlow_layers, pooling)
// TODO: fix tests and replace to pooling
TEST_P(Test_TensorFlow_layers, ave_pool_same)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
runTensorFlowNet("ave_pool_same");
}
......@@ -218,9 +222,16 @@ TEST_P(Test_TensorFlow_layers, reshape)
TEST_P(Test_TensorFlow_layers, flatten)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
runTensorFlowNet("flatten", true);
}
TEST_P(Test_TensorFlow_layers, unfused_flatten)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
runTensorFlowNet("unfused_flatten");
runTensorFlowNet("unfused_flatten_unknown_batch");
}
......@@ -500,8 +511,10 @@ TEST_P(Test_TensorFlow_layers, fp16_pad_and_concat)
{
const float l1 = 0.00071;
const float lInf = 0.012;
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf);
}
......
......@@ -111,10 +111,10 @@ public:
TEST_P(Test_Torch_layers, run_convolution)
{
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
runTorchNet("net_conv", "", false, true);
// Output reference values are in range [23.4018, 72.0181]
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.08 : default_l1;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.42 : default_lInf;
runTorchNet("net_conv", "", false, true, l1, lInf);
}
TEST_P(Test_Torch_layers, run_pool_max)
......@@ -129,19 +129,23 @@ TEST_P(Test_Torch_layers, run_pool_ave)
runTorchNet("net_pool_ave");
}
TEST_P(Test_Torch_layers, run_reshape)
TEST_P(Test_Torch_layers, run_reshape_change_batch_size)
{
runTorchNet("net_reshape");
}
TEST_P(Test_Torch_layers, run_reshape)
{
runTorchNet("net_reshape_batch");
runTorchNet("net_reshape_channels", "", false, true);
}
TEST_P(Test_Torch_layers, run_reshape_single_sample)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
// Reference output values in range [14.4586, 18.4492].
runTorchNet("net_reshape_single_sample", "", false, false,
(target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.0052 : 0.0);
(target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.0073 : default_l1,
(target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.025 : default_lInf);
}
TEST_P(Test_Torch_layers, run_linear)
......@@ -154,6 +158,10 @@ TEST_P(Test_Torch_layers, run_linear)
TEST_P(Test_Torch_layers, run_concat)
{
runTorchNet("net_concat", "l5_torchMerge");
}
TEST_P(Test_Torch_layers, run_depth_concat)
{
runTorchNet("net_depth_concat", "", false, true, 0.0,
target == DNN_TARGET_OPENCL_FP16 ? 0.021 : 0.0);
}
......@@ -207,6 +215,10 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
TEST_P(Test_Torch_layers, net_inception_block)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
runTorchNet("net_inception_block", "", false, true);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册