Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Greenplum
Opencv
提交
602e7c83
O
Opencv
项目概览
Greenplum
/
Opencv
10 个月 前同步成功
通知
7
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
O
Opencv
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
602e7c83
编写于
7月 10, 2021
作者:
A
Alexander Alekhin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
dnn(test): add extra IR models, more checks in IE testing code
上级
bc210b29
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
88 addition
and
10 deletion
+88
-10
modules/dnn/test/test_ie_models.cpp
modules/dnn/test/test_ie_models.cpp
+88
-10
未找到文件。
modules/dnn/test/test_ie_models.cpp
浏览文件 @
602e7c83
...
...
@@ -112,6 +112,25 @@ static const std::map<std::string, OpenVINOModelTestCaseInfo>& getOpenVINOTestMo
"intel/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013"
,
"intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013"
}},
#endif
#if INF_ENGINE_RELEASE >= 2021020000
// OMZ: 2020.2
{
"face-detection-0105"
,
{
"intel/face-detection-0105/FP32/face-detection-0105"
,
"intel/face-detection-0105/FP16/face-detection-0105"
}},
{
"face-detection-0106"
,
{
"intel/face-detection-0106/FP32/face-detection-0106"
,
"intel/face-detection-0106/FP16/face-detection-0106"
}},
#endif
#if INF_ENGINE_RELEASE >= 2021040000
// OMZ: 2021.4
{
"person-vehicle-bike-detection-2004"
,
{
"intel/person-vehicle-bike-detection-2004/FP32/person-vehicle-bike-detection-2004"
,
"intel/person-vehicle-bike-detection-2004/FP16/person-vehicle-bike-detection-2004"
//"intel/person-vehicle-bike-detection-2004/FP16-INT8/person-vehicle-bike-detection-2004"
}},
#endif
};
...
...
@@ -145,10 +164,22 @@ inline static std::string getOpenVINOModel(const std::string &modelName, bool is
static
inline
void
genData
(
const
InferenceEngine
::
TensorDesc
&
desc
,
Mat
&
m
,
Blob
::
Ptr
&
dataPtr
)
{
const
std
::
vector
<
size_t
>&
dims
=
desc
.
getDims
();
m
.
create
(
std
::
vector
<
int
>
(
dims
.
begin
(),
dims
.
end
()),
CV_32F
);
randu
(
m
,
-
1
,
1
);
dataPtr
=
make_shared_blob
<
float
>
(
desc
,
(
float
*
)
m
.
data
);
if
(
desc
.
getPrecision
()
==
InferenceEngine
::
Precision
::
FP32
)
{
m
.
create
(
std
::
vector
<
int
>
(
dims
.
begin
(),
dims
.
end
()),
CV_32F
);
randu
(
m
,
-
1
,
1
);
dataPtr
=
make_shared_blob
<
float
>
(
desc
,
(
float
*
)
m
.
data
);
}
else
if
(
desc
.
getPrecision
()
==
InferenceEngine
::
Precision
::
I32
)
{
m
.
create
(
std
::
vector
<
int
>
(
dims
.
begin
(),
dims
.
end
()),
CV_32S
);
randu
(
m
,
-
100
,
100
);
dataPtr
=
make_shared_blob
<
int
>
(
desc
,
(
int
*
)
m
.
data
);
}
else
{
FAIL
()
<<
"Unsupported precision: "
<<
desc
.
getPrecision
();
}
}
void
runIE
(
Target
target
,
const
std
::
string
&
xmlPath
,
const
std
::
string
&
binPath
,
...
...
@@ -254,7 +285,16 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap
inputBlobs
;
for
(
auto
&
it
:
net
.
getInputsInfo
())
{
genData
(
it
.
second
->
getTensorDesc
(),
inputsMap
[
it
.
first
],
inputBlobs
[
it
.
first
]);
const
InferenceEngine
::
TensorDesc
&
desc
=
it
.
second
->
getTensorDesc
();
genData
(
desc
,
inputsMap
[
it
.
first
],
inputBlobs
[
it
.
first
]);
if
(
cvtest
::
debugLevel
>
0
)
{
const
std
::
vector
<
size_t
>&
dims
=
desc
.
getDims
();
std
::
cout
<<
"Input: '"
<<
it
.
first
<<
"' precison="
<<
desc
.
getPrecision
()
<<
" dims="
<<
dims
.
size
()
<<
" ["
;
for
(
auto
d
:
dims
)
std
::
cout
<<
" "
<<
d
;
std
::
cout
<<
"] ocv_mat="
<<
inputsMap
[
it
.
first
].
size
<<
" of "
<<
typeToString
(
inputsMap
[
it
.
first
].
type
())
<<
std
::
endl
;
}
}
infRequest
.
SetInput
(
inputBlobs
);
...
...
@@ -263,7 +303,16 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap
outputBlobs
;
for
(
auto
&
it
:
net
.
getOutputsInfo
())
{
genData
(
it
.
second
->
getTensorDesc
(),
outputsMap
[
it
.
first
],
outputBlobs
[
it
.
first
]);
const
InferenceEngine
::
TensorDesc
&
desc
=
it
.
second
->
getTensorDesc
();
genData
(
desc
,
outputsMap
[
it
.
first
],
outputBlobs
[
it
.
first
]);
if
(
cvtest
::
debugLevel
>
0
)
{
const
std
::
vector
<
size_t
>&
dims
=
desc
.
getDims
();
std
::
cout
<<
"Output: '"
<<
it
.
first
<<
"' precison="
<<
desc
.
getPrecision
()
<<
" dims="
<<
dims
.
size
()
<<
" ["
;
for
(
auto
d
:
dims
)
std
::
cout
<<
" "
<<
d
;
std
::
cout
<<
"] ocv_mat="
<<
outputsMap
[
it
.
first
].
size
<<
" of "
<<
typeToString
(
outputsMap
[
it
.
first
].
type
())
<<
std
::
endl
;
}
}
infRequest
.
SetOutput
(
outputBlobs
);
...
...
@@ -284,6 +333,12 @@ void runCV(Backend backendId, Target targetId, const std::string& xmlPath, const
net
.
setPreferableTarget
(
targetId
);
std
::
vector
<
String
>
outNames
=
net
.
getUnconnectedOutLayersNames
();
if
(
cvtest
::
debugLevel
>
0
)
{
std
::
cout
<<
"OpenCV output names: "
<<
outNames
.
size
()
<<
std
::
endl
;
for
(
auto
name
:
outNames
)
std
::
cout
<<
"- "
<<
name
<<
std
::
endl
;
}
std
::
vector
<
Mat
>
outs
;
net
.
forward
(
outs
,
outNames
);
...
...
@@ -307,13 +362,26 @@ TEST_P(DNNTestOpenVINO, models)
ASSERT_FALSE
(
backendId
!=
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
&&
backendId
!=
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
)
<<
"Inference Engine backend is required"
;
#if INF_ENGINE_VER_MAJOR_EQ(2021040000)
if
(
targetId
==
DNN_TARGET_MYRIAD
&&
(
modelName
==
"person-detection-retail-0013"
||
// ncDeviceOpen:1013 Failed to find booted device after boot
modelName
==
"age-gender-recognition-retail-0013"
// ncDeviceOpen:1013 Failed to find booted device after boot
#if INF_ENGINE_VER_MAJOR_GE(2021030000)
if
(
targetId
==
DNN_TARGET_MYRIAD
&&
(
false
||
modelName
==
"person-detection-retail-0013"
// ncDeviceOpen:1013 Failed to find booted device after boot
||
modelName
==
"age-gender-recognition-retail-0013"
// ncDeviceOpen:1013 Failed to find booted device after boot
||
modelName
==
"face-detection-0105"
// get_element_type() must be called on a node with exactly one output
||
modelName
==
"face-detection-0106"
// get_element_type() must be called on a node with exactly one output
||
modelName
==
"person-vehicle-bike-detection-2004"
// 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
)
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_MYRIAD
,
CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
,
CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
if
(
targetId
==
DNN_TARGET_OPENCL
&&
(
false
||
modelName
==
"face-detection-0106"
// Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_OPENCL
,
CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
,
CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
if
(
targetId
==
DNN_TARGET_OPENCL_FP16
&&
(
false
||
modelName
==
"face-detection-0106"
// Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16
,
CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
,
CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
#endif
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
...
...
@@ -350,6 +418,8 @@ TEST_P(DNNTestOpenVINO, models)
if
(
targetId
==
DNN_TARGET_MYRIAD
)
resetMyriadDevice
();
EXPECT_NO_THROW
(
runIE
(
targetId
,
xmlPath
,
binPath
,
inputsMap
,
ieOutputsMap
))
<<
"runIE"
;
if
(
targetId
==
DNN_TARGET_MYRIAD
)
resetMyriadDevice
();
EXPECT_NO_THROW
(
runCV
(
backendId
,
targetId
,
xmlPath
,
binPath
,
inputsMap
,
cvOutputsMap
))
<<
"runCV"
;
double
eps
=
0
;
...
...
@@ -357,6 +427,14 @@ TEST_P(DNNTestOpenVINO, models)
if
(
targetId
==
DNN_TARGET_CPU
&&
checkHardwareSupport
(
CV_CPU_AVX_512F
))
eps
=
1e-5
;
#endif
#if INF_ENGINE_VER_MAJOR_GE(2021030000)
if
(
targetId
==
DNN_TARGET_CPU
&&
modelName
==
"face-detection-0105"
)
eps
=
2e-4
;
#endif
#if INF_ENGINE_VER_MAJOR_GE(2021040000)
if
(
targetId
==
DNN_TARGET_CPU
&&
modelName
==
"person-vehicle-bike-detection-2004"
)
eps
=
1e-6
;
#endif
EXPECT_EQ
(
ieOutputsMap
.
size
(),
cvOutputsMap
.
size
());
for
(
auto
&
srcIt
:
ieOutputsMap
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录