Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Greenplum
Opencv
提交
e8c7d617
O
Opencv
项目概览
Greenplum
/
Opencv
11 个月 前同步成功
通知
7
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
O
Opencv
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
e8c7d617
编写于
3月 23, 2020
作者:
A
Alexander Alekhin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #16817 from dkurt:dnn_onnx_lstm
上级
b1f390b1
467c3ef0
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
268 addition
and
120 deletion
+268
-120
modules/dnn/src/layers/recurrent_layers.cpp
modules/dnn/src/layers/recurrent_layers.cpp
+90
-79
modules/dnn/src/onnx/onnx_importer.cpp
modules/dnn/src/onnx/onnx_importer.cpp
+166
-41
modules/dnn/test/test_onnx_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
+12
-0
未找到文件。
modules/dnn/src/layers/recurrent_layers.cpp
浏览文件 @
e8c7d617
...
...
@@ -93,6 +93,7 @@ class LSTMLayerImpl CV_FINAL : public LSTMLayer
float
forgetBias
,
cellClip
;
bool
useCellClip
,
usePeephole
;
bool
reverse
;
// If true, go in negative direction along the time axis
bool
bidirectional
;
// If true, produces both forward and reversed directions along time axis
public:
...
...
@@ -101,6 +102,7 @@ public:
{
setParamsFrom
(
params
);
bidirectional
=
params
.
get
<
bool
>
(
"bidirectional"
,
false
);
if
(
!
blobs
.
empty
())
{
CV_Assert
(
blobs
.
size
()
>=
3
);
...
...
@@ -110,10 +112,11 @@ public:
const
Mat
&
Wh
=
blobs
[
0
];
const
Mat
&
Wx
=
blobs
[
1
];
const
Mat
&
bias
=
blobs
[
2
];
CV_Assert
(
Wh
.
dims
==
2
&&
Wx
.
dims
==
2
);
CV_Assert
(
Wh
.
rows
==
Wx
.
rows
);
CV_Assert
(
Wh
.
rows
==
4
*
Wh
.
cols
);
CV_Assert
(
Wh
.
rows
==
(
int
)
bias
.
total
());
CV_CheckEQ
(
Wh
.
dims
,
2
,
""
);
CV_CheckEQ
(
Wx
.
dims
,
2
,
""
);
CV_CheckEQ
(
Wh
.
rows
,
Wx
.
rows
,
""
);
CV_CheckEQ
(
Wh
.
rows
,
(
1
+
static_cast
<
int
>
(
bidirectional
))
*
4
*
Wh
.
cols
,
""
);
CV_CheckEQ
(
Wh
.
rows
,
(
int
)
bias
.
total
(),
""
);
CV_Assert
(
Wh
.
type
()
==
Wx
.
type
()
&&
Wx
.
type
()
==
bias
.
type
());
// Peephole weights.
...
...
@@ -135,6 +138,7 @@ public:
useCellClip
=
params
.
get
<
bool
>
(
"use_cell_clip"
,
false
);
usePeephole
=
params
.
get
<
bool
>
(
"use_peephole"
,
false
);
reverse
=
params
.
get
<
bool
>
(
"reverse"
,
false
);
CV_Assert
(
!
reverse
||
!
bidirectional
);
allocated
=
false
;
outTailShape
.
clear
();
...
...
@@ -206,6 +210,7 @@ public:
outResShape
.
push_back
(
_numSamples
);
outResShape
.
insert
(
outResShape
.
end
(),
outTailShape_
.
begin
(),
outTailShape_
.
end
());
outResShape
.
back
()
*=
(
1
+
static_cast
<
int
>
(
bidirectional
));
size_t
noutputs
=
produceCellOutput
?
2
:
1
;
outputs
.
assign
(
noutputs
,
outResShape
);
...
...
@@ -252,6 +257,7 @@ public:
outTsShape
.
clear
();
outTsShape
.
push_back
(
numSamples
);
outTsShape
.
insert
(
outTsShape
.
end
(),
outTailShape
.
begin
(),
outTailShape
.
end
());
outTsShape
.
back
()
*=
(
1
+
static_cast
<
int
>
(
bidirectional
));
allocated
=
true
;
}
...
...
@@ -272,91 +278,96 @@ public:
outputs_arr
.
getMatVector
(
output
);
internals_arr
.
getMatVector
(
internals
);
const
Mat
&
Wh
=
blobs
[
0
];
const
Mat
&
Wx
=
blobs
[
1
];
const
Mat
&
bias
=
blobs
[
2
];
int
numOut
=
Wh
.
size
[
1
];
Mat
hInternal
=
internals
[
0
],
cInternal
=
internals
[
1
],
dummyOnes
=
internals
[
2
],
gates
=
internals
[
3
];
hInternal
.
setTo
(
0.
);
cInternal
.
setTo
(
0.
);
dummyOnes
.
setTo
(
1.
);
int
numSamplesTotal
=
numTimeStamps
*
numSamples
;
Mat
xTs
=
input
[
0
].
reshape
(
1
,
numSamplesTotal
);
Mat
hOutTs
=
output
[
0
].
reshape
(
1
,
numSamplesTotal
);
Mat
cOutTs
=
produceCellOutput
?
output
[
1
].
reshape
(
1
,
numSamplesTotal
)
:
Mat
();
int
tsStart
,
tsEnd
,
tsInc
;
if
(
reverse
)
{
tsStart
=
numTimeStamps
-
1
;
tsEnd
=
-
1
;
tsInc
=
-
1
;
}
else
{
tsStart
=
0
;
tsEnd
=
numTimeStamps
;
tsInc
=
1
;
}
for
(
int
ts
=
tsStart
;
ts
!=
tsEnd
;
ts
+=
tsInc
)
const
int
numDirs
=
1
+
static_cast
<
int
>
(
bidirectional
);
for
(
int
i
=
0
;
i
<
numDirs
;
++
i
)
{
Range
curRowRange
(
ts
*
numSamples
,
(
ts
+
1
)
*
numSamples
);
Mat
xCurr
=
xTs
.
rowRange
(
curRowRange
);
const
Mat
&
Wh
=
blobs
[
0
].
rowRange
(
i
*
blobs
[
0
].
rows
/
numDirs
,
(
i
+
1
)
*
blobs
[
0
].
rows
/
numDirs
);
const
Mat
&
Wx
=
blobs
[
1
].
rowRange
(
i
*
blobs
[
1
].
rows
/
numDirs
,
(
i
+
1
)
*
blobs
[
1
].
rows
/
numDirs
);
const
Mat
&
bias
=
blobs
[
2
].
colRange
(
i
*
blobs
[
2
].
cols
/
numDirs
,
(
i
+
1
)
*
blobs
[
2
].
cols
/
numDirs
);
int
numOut
=
Wh
.
size
[
1
];
Mat
hInternal
=
internals
[
0
],
cInternal
=
internals
[
1
],
dummyOnes
=
internals
[
2
],
gates
=
internals
[
3
];
hInternal
.
setTo
(
0.
);
cInternal
.
setTo
(
0.
);
dummyOnes
.
setTo
(
1.
);
int
numSamplesTotal
=
numTimeStamps
*
numSamples
;
Mat
xTs
=
input
[
0
].
reshape
(
1
,
numSamplesTotal
);
Mat
hOutTs
=
output
[
0
].
reshape
(
1
,
numSamplesTotal
);
hOutTs
=
hOutTs
.
colRange
(
i
*
hOutTs
.
cols
/
numDirs
,
(
i
+
1
)
*
hOutTs
.
cols
/
numDirs
);
Mat
cOutTs
=
produceCellOutput
?
output
[
1
].
reshape
(
1
,
numSamplesTotal
)
:
Mat
();
int
tsStart
,
tsEnd
,
tsInc
;
if
(
reverse
||
i
==
1
)
{
tsStart
=
numTimeStamps
-
1
;
tsEnd
=
-
1
;
tsInc
=
-
1
;
}
else
{
tsStart
=
0
;
tsEnd
=
numTimeStamps
;
tsInc
=
1
;
}
for
(
int
ts
=
tsStart
;
ts
!=
tsEnd
;
ts
+=
tsInc
)
{
Range
curRowRange
(
ts
*
numSamples
,
(
ts
+
1
)
*
numSamples
);
Mat
xCurr
=
xTs
.
rowRange
(
curRowRange
);
gemm
(
xCurr
,
Wx
,
1
,
gates
,
0
,
gates
,
GEMM_2_T
);
// Wx * x_t
gemm
(
hInternal
,
Wh
,
1
,
gates
,
1
,
gates
,
GEMM_2_T
);
//+Wh * h_{t-1}
gemm
(
dummyOnes
,
bias
,
1
,
gates
,
1
,
gates
);
//+b
gemm
(
xCurr
,
Wx
,
1
,
gates
,
0
,
gates
,
GEMM_2_T
);
// Wx * x_t
gemm
(
hInternal
,
Wh
,
1
,
gates
,
1
,
gates
,
GEMM_2_T
);
//+Wh * h_{t-1}
gemm
(
dummyOnes
,
bias
,
1
,
gates
,
1
,
gates
);
//+b
Mat
gateI
=
gates
.
colRange
(
0
*
numOut
,
1
*
numOut
);
Mat
gateF
=
gates
.
colRange
(
1
*
numOut
,
2
*
numOut
);
Mat
gateO
=
gates
.
colRange
(
2
*
numOut
,
3
*
numOut
);
Mat
gateG
=
gates
.
colRange
(
3
*
numOut
,
4
*
numOut
);
Mat
gateI
=
gates
.
colRange
(
0
*
numOut
,
1
*
numOut
);
Mat
gateF
=
gates
.
colRange
(
1
*
numOut
,
2
*
numOut
);
Mat
gateO
=
gates
.
colRange
(
2
*
numOut
,
3
*
numOut
);
Mat
gateG
=
gates
.
colRange
(
3
*
numOut
,
4
*
numOut
);
if
(
forgetBias
)
add
(
gateF
,
forgetBias
,
gateF
);
if
(
forgetBias
)
add
(
gateF
,
forgetBias
,
gateF
);
if
(
usePeephole
)
{
Mat
gatesIF
=
gates
.
colRange
(
0
,
2
*
numOut
);
gemm
(
cInternal
,
blobs
[
3
],
1
,
gateI
,
1
,
gateI
);
gemm
(
cInternal
,
blobs
[
4
],
1
,
gateF
,
1
,
gateF
);
sigmoid
(
gatesIF
,
gatesIF
);
}
else
{
Mat
gatesIFO
=
gates
.
colRange
(
0
,
3
*
numOut
);
sigmoid
(
gatesIFO
,
gatesIFO
);
}
if
(
usePeephole
)
{
Mat
gatesIF
=
gates
.
colRange
(
0
,
2
*
numOut
);
gemm
(
cInternal
,
blobs
[
3
],
1
,
gateI
,
1
,
gateI
);
gemm
(
cInternal
,
blobs
[
4
],
1
,
gateF
,
1
,
gateF
);
sigmoid
(
gatesIF
,
gatesIF
);
}
else
{
Mat
gatesIFO
=
gates
.
colRange
(
0
,
3
*
numOut
);
sigmoid
(
gatesIFO
,
gatesIFO
);
}
tanh
(
gateG
,
gateG
);
tanh
(
gateG
,
gateG
);
//compute c_t
multiply
(
gateF
,
cInternal
,
gateF
);
// f_t (*) c_{t-1}
multiply
(
gateI
,
gateG
,
gateI
);
// i_t (*) g_t
add
(
gateF
,
gateI
,
cInternal
);
// c_t = f_t (*) c_{t-1} + i_t (*) g_t
//compute c_t
multiply
(
gateF
,
cInternal
,
gateF
);
// f_t (*) c_{t-1}
multiply
(
gateI
,
gateG
,
gateI
);
// i_t (*) g_t
add
(
gateF
,
gateI
,
cInternal
);
// c_t = f_t (*) c_{t-1} + i_t (*) g_t
if
(
useCellClip
)
{
min
(
cInternal
,
cellClip
,
cInternal
);
max
(
cInternal
,
-
cellClip
,
cInternal
);
}
if
(
usePeephole
)
{
gemm
(
cInternal
,
blobs
[
5
],
1
,
gateO
,
1
,
gateO
);
sigmoid
(
gateO
,
gateO
);
}
if
(
useCellClip
)
{
min
(
cInternal
,
cellClip
,
cInternal
);
max
(
cInternal
,
-
cellClip
,
cInternal
);
}
if
(
usePeephole
)
{
gemm
(
cInternal
,
blobs
[
5
],
1
,
gateO
,
1
,
gateO
);
sigmoid
(
gateO
,
gateO
);
}
//compute h_t
tanh
(
cInternal
,
hInternal
);
multiply
(
gateO
,
hInternal
,
hInternal
);
//compute h_t
tanh
(
cInternal
,
hInternal
);
multiply
(
gateO
,
hInternal
,
hInternal
);
//save results in output blobs
hInternal
.
copyTo
(
hOutTs
.
rowRange
(
curRowRange
));
if
(
produceCellOutput
)
cInternal
.
copyTo
(
cOutTs
.
rowRange
(
curRowRange
));
//save results in output blobs
hInternal
.
copyTo
(
hOutTs
.
rowRange
(
curRowRange
));
if
(
produceCellOutput
)
cInternal
.
copyTo
(
cOutTs
.
rowRange
(
curRowRange
));
}
}
}
};
...
...
modules/dnn/src/onnx/onnx_importer.cpp
浏览文件 @
e8c7d617
...
...
@@ -49,6 +49,11 @@ class ONNXImporter
LayerParams
getLayerParams
(
const
opencv_onnx
::
NodeProto
&
node_proto
);
bool
isCeilMode
(
const
LayerParams
&
layerParams
);
void
addLayer
(
Net
&
dstNet
,
LayerParams
&
layerParams
,
const
opencv_onnx
::
NodeProto
&
node_proto
,
std
::
map
<
std
::
string
,
LayerInfo
>&
layer_id
,
std
::
map
<
std
::
string
,
MatShape
>&
outShapes
);
public:
ONNXImporter
(
const
char
*
onnxFile
)
...
...
@@ -259,6 +264,42 @@ Mat ONNXImporter::getBlob(const opencv_onnx::NodeProto& node_proto,
return
constBlob
->
second
;
}
void
ONNXImporter
::
addLayer
(
Net
&
dstNet
,
LayerParams
&
layerParams
,
const
opencv_onnx
::
NodeProto
&
node_proto
,
std
::
map
<
std
::
string
,
LayerInfo
>&
layer_id
,
std
::
map
<
std
::
string
,
MatShape
>&
outShapes
)
{
std
::
map
<
std
::
string
,
LayerInfo
>::
iterator
layerId
;
std
::
map
<
std
::
string
,
MatShape
>::
iterator
shapeIt
;
int
id
=
dstNet
.
addLayer
(
layerParams
.
name
,
layerParams
.
type
,
layerParams
);
for
(
int
i
=
0
;
i
<
node_proto
.
output_size
();
++
i
)
{
layer_id
.
insert
(
std
::
make_pair
(
node_proto
.
output
(
i
),
LayerInfo
(
id
,
i
)));
}
std
::
vector
<
MatShape
>
layerInpShapes
,
layerOutShapes
,
layerInternalShapes
;
int
inpNum
=
0
;
for
(
int
j
=
0
;
j
<
node_proto
.
input_size
();
j
++
)
{
layerId
=
layer_id
.
find
(
node_proto
.
input
(
j
));
if
(
layerId
!=
layer_id
.
end
())
{
dstNet
.
connect
(
layerId
->
second
.
layerId
,
layerId
->
second
.
outputId
,
id
,
inpNum
);
++
inpNum
;
// Collect input shapes.
shapeIt
=
outShapes
.
find
(
node_proto
.
input
(
j
));
CV_Assert
(
shapeIt
!=
outShapes
.
end
());
layerInpShapes
.
push_back
(
shapeIt
->
second
);
}
}
// Compute shape of output blob for this layer.
Ptr
<
Layer
>
layer
=
dstNet
.
getLayer
(
id
);
layer
->
getMemoryShapes
(
layerInpShapes
,
0
,
layerOutShapes
,
layerInternalShapes
);
for
(
int
i
=
0
;
i
<
node_proto
.
output_size
()
&&
i
<
(
int
)
layerOutShapes
.
size
();
++
i
)
{
outShapes
[
node_proto
.
output
(
i
)]
=
layerOutShapes
[
i
];
}
}
void
ONNXImporter
::
populateNet
(
Net
dstNet
)
{
CV_Assert
(
model_proto
.
has_graph
());
...
...
@@ -455,6 +496,7 @@ void ONNXImporter::populateNet(Net dstNet)
runLayer
(
layerParams
,
inputs
,
sliced
);
CV_Assert
(
sliced
.
size
()
==
1
);
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
sliced
[
0
]));
outShapes
[
layerParams
.
name
]
=
shape
(
sliced
[
0
]);
continue
;
}
}
...
...
@@ -579,6 +621,70 @@ void ONNXImporter::populateNet(Net dstNet)
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
layerParams
.
blobs
[
0
]));
continue
;
}
else
if
(
layer_type
==
"LSTM"
)
{
LayerParams
lstmParams
=
layerParams
;
lstmParams
.
name
+=
"/lstm"
;
// https://pytorch.org/docs/stable/nn.html#lstm
CV_Assert
(
node_proto
.
input_size
()
==
7
);
Mat
Wx
=
getBlob
(
node_proto
,
constBlobs
,
1
);
Mat
Wh
=
getBlob
(
node_proto
,
constBlobs
,
2
);
Mat
b
=
getBlob
(
node_proto
,
constBlobs
,
3
);
CV_CheckEQ
(
countNonZero
(
getBlob
(
node_proto
,
constBlobs
,
5
)),
0
,
"Unsupported non zero initial_h"
);
CV_CheckEQ
(
countNonZero
(
getBlob
(
node_proto
,
constBlobs
,
6
)),
0
,
"Unsupported non zero initial_c"
);
b
=
b
.
reshape
(
1
,
b
.
size
[
0
]);
const
int
numHidden
=
lstmParams
.
get
<
int
>
(
"hidden_size"
);
const
int
numDirs
=
Wx
.
size
[
0
];
// Is 1 for forward only and 2 for bidirectional LSTM.
const
int
numFeatures
=
Wx
.
size
[
2
];
Mat
bx
=
b
.
colRange
(
0
,
b
.
cols
/
2
);
Mat
bh
=
b
.
colRange
(
b
.
cols
/
2
,
b
.
cols
);
b
=
bx
+
bh
;
// IFGO->IGFO
for
(
int
k
=
0
;
k
<
numDirs
;
++
k
)
{
float
*
WxData
=
Wx
.
ptr
<
float
>
(
k
);
float
*
WhData
=
Wh
.
ptr
<
float
>
(
k
);
float
*
biasData
=
b
.
ptr
<
float
>
(
k
);
for
(
int
j
=
0
;
j
<
numHidden
;
++
j
)
{
for
(
int
i
=
0
;
i
<
numFeatures
;
++
i
)
{
std
::
swap
(
WxData
[(
numHidden
+
j
)
*
numFeatures
+
i
],
WxData
[(
numHidden
*
2
+
j
)
*
numFeatures
+
i
]);
}
for
(
int
i
=
0
;
i
<
numHidden
;
++
i
)
{
std
::
swap
(
WhData
[(
numHidden
+
j
)
*
numHidden
+
i
],
WhData
[(
numHidden
*
2
+
j
)
*
numHidden
+
i
]);
}
std
::
swap
(
biasData
[
numHidden
+
j
],
biasData
[
numHidden
*
2
+
j
]);
}
}
Wx
=
Wx
.
reshape
(
1
,
Wx
.
size
[
0
]
*
Wx
.
size
[
1
]);
Wh
=
Wh
.
reshape
(
1
,
Wh
.
size
[
0
]
*
Wh
.
size
[
1
]);
lstmParams
.
blobs
.
resize
(
3
);
lstmParams
.
blobs
[
0
]
=
Wh
;
lstmParams
.
blobs
[
1
]
=
Wx
;
lstmParams
.
blobs
[
2
]
=
b
;
lstmParams
.
set
(
"bidirectional"
,
lstmParams
.
get
<
String
>
(
"direction"
,
""
)
==
"bidirectional"
);
node_proto
.
set_output
(
0
,
lstmParams
.
name
);
// set different name so output shapes will be registered on that name
addLayer
(
dstNet
,
lstmParams
,
node_proto
,
layer_id
,
outShapes
);
MatShape
lstmShape
=
outShapes
[
node_proto
.
output
(
0
)];
// Add fake 1 as it is done in ONNX
lstmShape
.
insert
(
lstmShape
.
begin
()
+
1
,
1
);
layerParams
.
type
=
"Reshape"
;
layerParams
.
set
(
"dim"
,
DictValue
::
arrayInt
(
&
lstmShape
[
0
],
lstmShape
.
size
()));
node_proto
.
set_input
(
0
,
lstmParams
.
name
);
// redirect input to LSTM
node_proto
.
set_output
(
0
,
layerParams
.
name
);
// keep origin LSTM's name
}
else
if
(
layer_type
==
"ImageScaler"
)
{
const
float
scale
=
layerParams
.
has
(
"scale"
)
?
layerParams
.
get
<
float
>
(
"scale"
)
:
1.0
f
;
...
...
@@ -882,13 +988,38 @@ void ONNXImporter::populateNet(Net dstNet)
{
CV_Assert_N
(
node_proto
.
input_size
()
==
1
,
layerParams
.
has
(
"axes"
));
DictValue
axes_dict
=
layerParams
.
get
(
"axes"
);
if
(
axes_dict
.
size
()
!=
1
)
CV_Error
(
Error
::
StsNotImplemented
,
"Multidimensional squeeze"
);
MatShape
inpShape
=
outShapes
[
node_proto
.
input
(
0
)];
int
axis
=
axes_dict
.
getIntValue
(
0
);
layerParams
.
set
(
"axis"
,
axis
-
1
);
layerParams
.
set
(
"end_axis"
,
axis
);
layerParams
.
type
=
"Flatten"
;
std
::
vector
<
bool
>
maskedAxes
(
inpShape
.
size
(),
false
);
for
(
int
i
=
0
;
i
<
axes_dict
.
size
();
++
i
)
{
int
axis
=
axes_dict
.
getIntValue
(
i
);
CV_CheckLE
(
axis
,
static_cast
<
int
>
(
inpShape
.
size
()),
"Squeeze axis"
);
maskedAxes
[
axis
]
=
inpShape
[
axis
]
==
1
;
}
MatShape
outShape
;
for
(
int
i
=
0
;
i
<
inpShape
.
size
();
++
i
)
{
if
(
!
maskedAxes
[
i
])
outShape
.
push_back
(
inpShape
[
i
]);
}
if
(
outShape
.
size
()
!=
inpShape
.
size
())
{
layerParams
.
type
=
"Reshape"
;
layerParams
.
set
(
"dim"
,
DictValue
::
arrayInt
(
&
outShape
[
0
],
outShape
.
size
()));
}
else
layerParams
.
type
=
"Identity"
;
if
(
constBlobs
.
find
(
node_proto
.
input
(
0
))
!=
constBlobs
.
end
())
{
Mat
inp
=
getBlob
(
node_proto
,
constBlobs
,
0
);
Mat
out
=
inp
.
reshape
(
1
,
outShape
);
out
.
dims
=
outShape
.
size
();
// to workaround dims == 1
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
out
));
outShapes
[
layerParams
.
name
]
=
shape
(
out
);
continue
;
}
}
else
if
(
layer_type
==
"Flatten"
)
{
...
...
@@ -1018,9 +1149,17 @@ void ONNXImporter::populateNet(Net dstNet)
else
layerParams
.
type
=
"Identity"
;
}
else
if
(
layer_type
==
"ConstantOfShape"
)
else
if
(
layer_type
==
"ConstantOfShape"
||
layer_type
==
"ConstantFill"
)
{
float
fill_value
=
layerParams
.
blobs
.
empty
()
?
0
:
layerParams
.
blobs
[
0
].
at
<
float
>
(
0
,
0
);
float
fill_value
;
if
(
!
layerParams
.
blobs
.
empty
())
{
CV_Assert
(
!
layerParams
.
has
(
"value"
));
fill_value
=
layerParams
.
blobs
[
0
].
at
<
float
>
(
0
,
0
);
}
else
fill_value
=
layerParams
.
get
(
"value"
,
0
);
MatShape
inpShape
=
getBlob
(
node_proto
,
constBlobs
,
0
);
for
(
int
i
=
0
;
i
<
inpShape
.
size
();
i
++
)
CV_CheckGT
(
inpShape
[
i
],
0
,
""
);
...
...
@@ -1032,17 +1171,30 @@ void ONNXImporter::populateNet(Net dstNet)
else
if
(
layer_type
==
"Gather"
)
{
CV_Assert
(
node_proto
.
input_size
()
==
2
);
CV_Assert
(
layerParams
.
has
(
"axis"
));
Mat
input
=
getBlob
(
node_proto
,
constBlobs
,
0
);
Mat
indexMat
=
getBlob
(
node_proto
,
constBlobs
,
1
);
CV_Assert_N
(
indexMat
.
type
()
==
CV_32S
,
indexMat
.
total
()
==
1
);
int
index
=
indexMat
.
at
<
int
>
(
0
);
int
axis
=
layerParams
.
get
<
int
>
(
"axis"
);
std
::
vector
<
cv
::
Range
>
ranges
(
input
.
dims
,
Range
::
all
());
ranges
[
axis
]
=
Range
(
index
,
index
+
1
);
Mat
out
;
if
(
layerParams
.
has
(
"axis"
))
{
int
axis
=
layerParams
.
get
<
int
>
(
"axis"
);
std
::
vector
<
cv
::
Range
>
ranges
(
input
.
dims
,
Range
::
all
());
ranges
[
axis
]
=
Range
(
index
,
index
+
1
);
Mat
out
=
input
(
ranges
);
out
=
input
(
ranges
);
}
else
{
CV_Assert
(
index
<
input
.
total
());
const
int
dims
=
input
.
dims
;
input
=
input
.
reshape
(
1
,
1
);
input
.
dims
=
2
;
out
=
input
.
reshape
(
1
,
1
).
colRange
(
index
,
index
+
1
);
out
.
dims
=
dims
;
}
constBlobs
.
insert
(
std
::
make_pair
(
layerParams
.
name
,
out
));
continue
;
}
...
...
@@ -1145,34 +1297,7 @@ void ONNXImporter::populateNet(Net dstNet)
layerParams
.
blobs
.
push_back
(
getBlob
(
node_proto
,
constBlobs
,
j
));
}
}
int
id
=
dstNet
.
addLayer
(
layerParams
.
name
,
layerParams
.
type
,
layerParams
);
for
(
int
i
=
0
;
i
<
node_proto
.
output_size
();
++
i
)
{
layer_id
.
insert
(
std
::
make_pair
(
node_proto
.
output
(
i
),
LayerInfo
(
id
,
i
)));
}
std
::
vector
<
MatShape
>
layerInpShapes
,
layerOutShapes
,
layerInternalShapes
;
int
inpNum
=
0
;
for
(
int
j
=
0
;
j
<
node_proto
.
input_size
();
j
++
)
{
layerId
=
layer_id
.
find
(
node_proto
.
input
(
j
));
if
(
layerId
!=
layer_id
.
end
())
{
dstNet
.
connect
(
layerId
->
second
.
layerId
,
layerId
->
second
.
outputId
,
id
,
inpNum
);
++
inpNum
;
// Collect input shapes.
shapeIt
=
outShapes
.
find
(
node_proto
.
input
(
j
));
CV_Assert
(
shapeIt
!=
outShapes
.
end
());
layerInpShapes
.
push_back
(
shapeIt
->
second
);
}
}
// Compute shape of output blob for this layer.
Ptr
<
Layer
>
layer
=
dstNet
.
getLayer
(
id
);
layer
->
getMemoryShapes
(
layerInpShapes
,
0
,
layerOutShapes
,
layerInternalShapes
);
for
(
int
i
=
0
;
i
<
node_proto
.
output_size
()
&&
i
<
(
int
)
layerOutShapes
.
size
();
++
i
)
{
outShapes
[
node_proto
.
output
(
i
)]
=
layerOutShapes
[
i
];
}
addLayer
(
dstNet
,
layerParams
,
node_proto
,
layer_id
,
outShapes
);
}
}
...
...
modules/dnn/test/test_onnx_importer.cpp
浏览文件 @
e8c7d617
...
...
@@ -405,6 +405,8 @@ TEST_P(Test_ONNX_layers, Reshape)
TEST_P
(
Test_ONNX_layers
,
Squeeze
)
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
&&
target
==
DNN_TARGET_MYRIAD
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_MYRIAD
,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
);
testONNXModels
(
"squeeze"
);
}
...
...
@@ -451,6 +453,16 @@ TEST_P(Test_ONNX_layers, Split_EltwiseMax)
testONNXModels
(
"split_max"
);
}
TEST_P
(
Test_ONNX_layers
,
LSTM
)
{
testONNXModels
(
"lstm"
,
npy
,
0
,
0
,
false
,
false
);
}
TEST_P
(
Test_ONNX_layers
,
LSTM_bidirectional
)
{
testONNXModels
(
"lstm_bidirectional"
,
npy
,
0
,
0
,
false
,
false
);
}
INSTANTIATE_TEST_CASE_P
(
/*nothing*/
,
Test_ONNX_layers
,
dnnBackendsAndTargets
());
class
Test_ONNX_nets
:
public
Test_ONNX_layers
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录