Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
自由之枫~
opencv
提交
0711dab0
O
opencv
项目概览
自由之枫~
/
opencv
与 Fork 源项目一致
Fork自
OpenCV / opencv
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
O
opencv
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
0711dab0
编写于
2月 11, 2019
作者:
D
Dmitry Kurtaev
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix Intel's Inference Engine backend from future. Second try.
上级
5f578425
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
45 addition
and
11 deletion
+45
-11
modules/dnn/src/dnn.cpp
modules/dnn/src/dnn.cpp
+23
-1
modules/dnn/src/layers/normalize_bbox_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
+13
-7
modules/dnn/src/layers/resize_layer.cpp
modules/dnn/src/layers/resize_layer.cpp
+1
-1
modules/dnn/src/op_inf_engine.cpp
modules/dnn/src/op_inf_engine.cpp
+1
-1
modules/dnn/src/op_inf_engine.hpp
modules/dnn/src/op_inf_engine.hpp
+2
-1
modules/dnn/test/test_halide_layers.cpp
modules/dnn/test/test_halide_layers.cpp
+5
-0
未找到文件。
modules/dnn/src/dnn.cpp
浏览文件 @
0711dab0
...
...
@@ -1637,6 +1637,27 @@ struct Net::Impl
preferableTarget
==
DNN_TARGET_MYRIAD
||
preferableTarget
==
DNN_TARGET_FPGA
)
&&
!
fused
)
{
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
bool
hasWeights
=
false
;
for
(
const
std
::
string
&
name
:
{
"weights"
,
"biases"
})
{
auto
it
=
ieNode
->
layer
.
getParameters
().
find
(
name
);
if
(
it
!=
ieNode
->
layer
.
getParameters
().
end
())
{
InferenceEngine
::
Blob
::
CPtr
bp
=
it
->
second
.
as
<
InferenceEngine
::
Blob
::
CPtr
>
();
it
->
second
=
(
InferenceEngine
::
Blob
::
CPtr
)
convertFp16
(
std
::
const_pointer_cast
<
InferenceEngine
::
Blob
>
(
bp
));
hasWeights
=
true
;
}
}
if
(
!
hasWeights
)
{
InferenceEngine
::
Blob
::
Ptr
blob
=
InferenceEngine
::
make_shared_blob
<
int16_t
>
(
InferenceEngine
::
Precision
::
FP16
,
InferenceEngine
::
Layout
::
C
,
{
1
});
blob
->
allocate
();
ieNode
->
layer
.
getParameters
()[
"weights"
]
=
(
InferenceEngine
::
Blob
::
CPtr
)
blob
;
}
#else
auto
&
blobs
=
ieNode
->
layer
.
getConstantData
();
if
(
blobs
.
empty
())
{
...
...
@@ -1653,6 +1674,7 @@ struct Net::Impl
for
(
auto
&
it
:
blobs
)
it
.
second
=
convertFp16
(
std
::
const_pointer_cast
<
InferenceEngine
::
Blob
>
(
it
.
second
));
}
#endif
}
if
(
!
fused
)
...
...
@@ -1724,7 +1746,7 @@ struct Net::Impl
if
(
!
ieNode
->
net
->
isInitialized
())
{
#if INF_ENGINE_VER_MAJOR_
GT(INF_ENGINE_RELEASE_2018R3
)
#if INF_ENGINE_VER_MAJOR_
EQ(INF_ENGINE_RELEASE_2018R4
)
// For networks which is built in runtime we need to specify a
// version of it's hyperparameters.
std
::
string
versionTrigger
=
"<net name=
\"
TestInput
\"
version=
\"
3
\"
batch=
\"
1
\"
>"
...
...
modules/dnn/src/layers/normalize_bbox_layer.cpp
浏览文件 @
0711dab0
...
...
@@ -276,23 +276,29 @@ public:
InferenceEngine
::
Builder
::
Layer
l
=
ieLayer
;
const
int
numChannels
=
input
->
dims
[
2
];
// NOTE: input->dims are reversed (whcn)
InferenceEngine
::
Blob
::
Ptr
weights
;
if
(
blobs
.
empty
())
{
auto
weights
=
InferenceEngine
::
make_shared_blob
<
float
>
(
InferenceEngine
::
Precision
::
FP32
,
auto
onesBlob
=
InferenceEngine
::
make_shared_blob
<
float
>
(
InferenceEngine
::
Precision
::
FP32
,
InferenceEngine
::
Layout
::
C
,
{(
size_t
)
numChannels
});
weights
->
allocate
();
onesBlob
->
allocate
();
std
::
vector
<
float
>
ones
(
numChannels
,
1
);
weights
->
set
(
ones
);
l
.
addConstantData
(
"weights"
,
weights
)
;
onesBlob
->
set
(
ones
);
weights
=
onesBlob
;
l
.
getParameters
()[
"channel_shared"
]
=
false
;
}
else
{
CV_Assert
(
numChannels
==
blobs
[
0
].
total
());
l
.
addConstantData
(
"weights"
,
wrapToInfEngineBlob
(
blobs
[
0
],
{(
size_t
)
numChannels
},
InferenceEngine
::
Layout
::
C
)
);
weights
=
wrapToInfEngineBlob
(
blobs
[
0
],
{(
size_t
)
numChannels
},
InferenceEngine
::
Layout
::
C
);
l
.
getParameters
()[
"channel_shared"
]
=
blobs
[
0
].
total
()
==
1
;
}
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
l
.
getParameters
()[
"weights"
]
=
(
InferenceEngine
::
Blob
::
CPtr
)
weights
;
#else
l
.
addConstantData
(
"weights"
,
weights
);
#endif
l
.
getParameters
()[
"across_spatial"
]
=
acrossSpatial
;
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
l
));
}
...
...
modules/dnn/src/layers/resize_layer.cpp
浏览文件 @
0711dab0
...
...
@@ -173,7 +173,7 @@ public:
ieLayer
.
getParameters
()[
"antialias"
]
=
false
;
if
(
scaleWidth
!=
scaleHeight
)
CV_Error
(
Error
::
StsNotImplemented
,
"resample with sw != sh"
);
ieLayer
.
getParameters
()[
"factor"
]
=
1.0
/
scaleWidth
;
ieLayer
.
getParameters
()[
"factor"
]
=
1.0
f
/
scaleWidth
;
}
else
if
(
interpolation
==
"bilinear"
)
{
...
...
modules/dnn/src/op_inf_engine.cpp
浏览文件 @
0711dab0
...
...
@@ -766,7 +766,7 @@ void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArra
CV_Error
(
Error
::
StsInternal
,
"Choose Inference Engine as a preferable backend."
);
}
InferenceEngine
::
TBlob
<
int16_t
>
::
Ptr
convertFp16
(
const
InferenceEngine
::
Blob
::
Ptr
&
blob
)
InferenceEngine
::
Blob
::
Ptr
convertFp16
(
const
InferenceEngine
::
Blob
::
Ptr
&
blob
)
{
auto
halfs
=
InferenceEngine
::
make_shared_blob
<
int16_t
>
(
InferenceEngine
::
Precision
::
FP16
,
blob
->
layout
(),
blob
->
dims
());
halfs
->
allocate
();
...
...
modules/dnn/src/op_inf_engine.hpp
浏览文件 @
0711dab0
...
...
@@ -36,6 +36,7 @@
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#include <ie_builders.hpp>
...
...
@@ -252,7 +253,7 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine
::
TBlob
<
int16_t
>
::
Ptr
convertFp16
(
const
InferenceEngine
::
Blob
::
Ptr
&
blob
);
InferenceEngine
::
Blob
::
Ptr
convertFp16
(
const
InferenceEngine
::
Blob
::
Ptr
&
blob
);
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
...
...
modules/dnn/test/test_halide_layers.cpp
浏览文件 @
0711dab0
...
...
@@ -694,6 +694,11 @@ TEST_P(Eltwise, Accuracy)
Backend
backendId
=
get
<
0
>
(
get
<
4
>
(
GetParam
()));
Target
targetId
=
get
<
1
>
(
get
<
4
>
(
GetParam
()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE
&&
targetId
==
DNN_TARGET_OPENCL
)
throw
SkipTestException
(
""
);
#endif
Net
net
;
std
::
vector
<
int
>
convLayerIds
(
numConv
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录