Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Greenplum
Opencv
提交
4f08bb50
O
Opencv
项目概览
Greenplum
/
Opencv
11 个月 前同步成功
通知
7
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
O
Opencv
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
4f08bb50
编写于
2月 04, 2021
作者:
A
Alexander Alekhin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #19428 from alalek:dnn_drop_misbehaved_clamp
上级
09d2ca17
83aa7113
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
57 addition
and
52 deletion
+57
-52
modules/dnn/include/opencv2/dnn/shape_utils.hpp
modules/dnn/include/opencv2/dnn/shape_utils.hpp
+19
-7
modules/dnn/src/dnn.cpp
modules/dnn/src/dnn.cpp
+1
-1
modules/dnn/src/layers/concat_layer.cpp
modules/dnn/src/layers/concat_layer.cpp
+5
-5
modules/dnn/src/layers/flatten_layer.cpp
modules/dnn/src/layers/flatten_layer.cpp
+6
-6
modules/dnn/src/layers/fully_connected_layer.cpp
modules/dnn/src/layers/fully_connected_layer.cpp
+3
-3
modules/dnn/src/layers/normalize_bbox_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
+4
-4
modules/dnn/src/layers/reshape_layer.cpp
modules/dnn/src/layers/reshape_layer.cpp
+1
-8
modules/dnn/src/layers/scale_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
+1
-1
modules/dnn/src/layers/slice_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
+4
-4
modules/dnn/src/layers/softmax_layer.cpp
modules/dnn/src/layers/softmax_layer.cpp
+5
-5
modules/dnn/src/onnx/onnx_importer.cpp
modules/dnn/src/onnx/onnx_importer.cpp
+8
-8
未找到文件。
modules/dnn/include/opencv2/dnn/shape_utils.hpp
浏览文件 @
4f08bb50
...
...
@@ -205,21 +205,33 @@ static inline std::ostream& operator<<(std::ostream &out, const MatShape& shape)
return
out
;
}
inline
int
clamp
(
int
ax
,
int
dims
)
/// @brief Converts axis from `[-dims; dims)` (similar to Python's slice notation) to `[0; dims)` range.
static
inline
int
normalize_axis
(
int
axis
,
int
dims
)
{
return
ax
<
0
?
ax
+
dims
:
ax
;
CV_Check
(
axis
,
axis
>=
-
dims
&&
axis
<
dims
,
""
);
axis
=
(
axis
<
0
)
?
(
dims
+
axis
)
:
axis
;
CV_DbgCheck
(
axis
,
axis
>=
0
&&
axis
<
dims
,
""
);
return
axis
;
}
inline
int
clamp
(
int
ax
,
const
MatShape
&
shape
)
static
inline
int
normalize_axis
(
int
axis
,
const
MatShape
&
shape
)
{
return
clamp
(
ax
,
(
int
)
shape
.
size
());
return
normalize_axis
(
axis
,
(
int
)
shape
.
size
());
}
inline
Range
clamp
(
const
Range
&
r
,
int
axisSize
)
static
inline
Range
normalize_axis_range
(
const
Range
&
r
,
int
axisSize
)
{
Range
clamped
(
std
::
max
(
r
.
start
,
0
),
if
(
r
==
Range
::
all
())
return
Range
(
0
,
axisSize
);
CV_CheckGE
(
r
.
start
,
0
,
""
);
Range
clamped
(
r
.
start
,
r
.
end
>
0
?
std
::
min
(
r
.
end
,
axisSize
)
:
axisSize
+
r
.
end
+
1
);
CV_Assert_N
(
clamped
.
start
<
clamped
.
end
,
clamped
.
end
<=
axisSize
);
CV_DbgCheckGE
(
clamped
.
start
,
0
,
""
);
CV_CheckLT
(
clamped
.
start
,
clamped
.
end
,
""
);
CV_CheckLE
(
clamped
.
end
,
axisSize
,
""
);
return
clamped
;
}
...
...
modules/dnn/src/dnn.cpp
浏览文件 @
4f08bb50
...
...
@@ -2598,7 +2598,7 @@ struct Net::Impl : public detail::NetImplBase
// the concatenation optimization is applied with batch_size > 1.
// so, for now, we only apply this optimization in the most popular
// case batch_size == 1.
int
axis
=
clamp
(
concatLayer
->
axis
,
output
.
dims
);
int
axis
=
normalize_axis
(
concatLayer
->
axis
,
output
.
dims
);
if
(
output
.
total
(
0
,
axis
)
==
1
)
{
size_t
i
,
ninputs
=
ld
.
inputBlobsId
.
size
();
...
...
modules/dnn/src/layers/concat_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -72,7 +72,7 @@ public:
{
CV_Assert
(
inputs
.
size
()
>
0
);
outputs
.
resize
(
1
,
inputs
[
0
]);
int
cAxis
=
clamp
(
axis
,
inputs
[
0
]);
int
cAxis
=
normalize_axis
(
axis
,
inputs
[
0
]);
int
axisSum
=
0
;
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
...
...
@@ -192,7 +192,7 @@ public:
inps
.
getUMatVector
(
inputs
);
outs
.
getUMatVector
(
outputs
);
int
cAxis
=
clamp
(
axis
,
inputs
[
0
].
dims
);
int
cAxis
=
normalize_axis
(
axis
,
inputs
[
0
].
dims
);
if
(
padding
)
return
false
;
...
...
@@ -246,7 +246,7 @@ public:
inputs_arr
.
getMatVector
(
inputs
);
outputs_arr
.
getMatVector
(
outputs
);
int
cAxis
=
clamp
(
axis
,
inputs
[
0
].
dims
);
int
cAxis
=
normalize_axis
(
axis
,
inputs
[
0
].
dims
);
Mat
&
outMat
=
outputs
[
0
];
if
(
padding
)
...
...
@@ -306,7 +306,7 @@ public:
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
Builder
::
ConcatLayer
ieLayer
(
name
);
ieLayer
.
setAxis
(
clamp
(
axis
,
input
->
getDims
().
size
()));
ieLayer
.
setAxis
(
normalize_axis
(
axis
,
input
->
getDims
().
size
()));
ieLayer
.
setInputPorts
(
std
::
vector
<
InferenceEngine
::
Port
>
(
inputs
.
size
()));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
}
...
...
@@ -319,7 +319,7 @@ public:
{
InferenceEngine
::
DataPtr
data
=
ngraphDataNode
(
inputs
[
0
]);
const
int
numDims
=
data
->
getDims
().
size
();
const
int
cAxis
=
clamp
(
axis
,
numDims
);
const
int
cAxis
=
normalize_axis
(
axis
,
numDims
);
std
::
vector
<
size_t
>
maxDims
(
numDims
,
0
);
CV_Assert
(
inputs
.
size
()
==
nodes
.
size
());
...
...
modules/dnn/src/layers/flatten_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -82,8 +82,8 @@ public:
}
int
numAxes
=
inputs
[
0
].
size
();
int
startAxis
=
clamp
(
_startAxis
,
numAxes
);
int
endAxis
=
clamp
(
_endAxis
,
numAxes
);
int
startAxis
=
normalize_axis
(
_startAxis
,
numAxes
);
int
endAxis
=
normalize_axis
(
_endAxis
,
numAxes
);
CV_Assert
(
startAxis
>=
0
);
CV_Assert
(
endAxis
>=
startAxis
&&
endAxis
<
(
int
)
numAxes
);
...
...
@@ -113,8 +113,8 @@ public:
inputs_arr
.
getMatVector
(
inputs
);
int
numAxes
=
inputs
[
0
].
dims
;
_startAxis
=
clamp
(
_startAxis
,
numAxes
);
_endAxis
=
clamp
(
_endAxis
,
numAxes
);
_startAxis
=
normalize_axis
(
_startAxis
,
numAxes
);
_endAxis
=
normalize_axis
(
_endAxis
,
numAxes
);
}
#ifdef HAVE_OPENCL
...
...
@@ -186,8 +186,8 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
std
::
vector
<
size_t
>
dims
=
ieInpNode
->
get_shape
();
int
numAxes
=
dims
.
size
();
int
startAxis
=
clamp
(
_startAxis
,
numAxes
);
int
endAxis
=
clamp
(
_endAxis
,
numAxes
);
int
startAxis
=
normalize_axis
(
_startAxis
,
numAxes
);
int
endAxis
=
normalize_axis
(
_endAxis
,
numAxes
);
CV_Assert
(
startAxis
>=
0
);
CV_Assert
(
endAxis
>=
startAxis
&&
endAxis
<
numAxes
);
...
...
modules/dnn/src/layers/fully_connected_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -129,7 +129,7 @@ public:
CV_CheckEQ
(
blobs
[
0
].
dims
,
2
,
""
);
numOutput
=
blobs
[
0
].
size
[
0
];
CV_Assert
(
!
bias
||
(
size_t
)
numOutput
==
blobs
[
1
].
total
());
cAxis
=
clamp
(
axis
,
inputs
[
0
]);
cAxis
=
normalize_axis
(
axis
,
inputs
[
0
]);
}
MatShape
outShape
(
cAxis
+
1
);
...
...
@@ -352,7 +352,7 @@ public:
return
true
;
}
int
axisCan
=
clamp
(
axis
,
inputs
[
0
].
dims
);
int
axisCan
=
normalize_axis
(
axis
,
inputs
[
0
].
dims
);
int
numOutput
=
blobs
[
0
].
size
[
0
];
int
innerSize
=
blobs
[
0
].
size
[
1
];
int
outerSize
=
total
(
shape
(
inputs
[
0
]),
0
,
axisCan
);
...
...
@@ -473,7 +473,7 @@ public:
if
(
!
blobs
.
empty
())
{
int
axisCan
=
clamp
(
axis
,
input
[
0
].
dims
);
int
axisCan
=
normalize_axis
(
axis
,
input
[
0
].
dims
);
int
outerSize
=
input
[
0
].
total
(
0
,
axisCan
);
for
(
size_t
i
=
0
;
i
<
input
.
size
();
i
++
)
...
...
modules/dnn/src/layers/normalize_bbox_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -118,8 +118,8 @@ public:
const
UMat
&
inp0
=
inputs
[
0
];
UMat
&
buffer
=
internals
[
0
];
startAxis
=
clamp
(
startAxis
,
inp0
.
dims
);
endAxis
=
clamp
(
endAxis
,
inp0
.
dims
);
startAxis
=
normalize_axis
(
startAxis
,
inp0
.
dims
);
endAxis
=
normalize_axis
(
endAxis
,
inp0
.
dims
);
size_t
num
=
total
(
shape
(
inp0
.
size
),
0
,
startAxis
);
size_t
numPlanes
=
total
(
shape
(
inp0
.
size
),
startAxis
,
endAxis
+
1
);
...
...
@@ -203,8 +203,8 @@ public:
const
Mat
&
inp0
=
inputs
[
0
];
Mat
&
buffer
=
internals
[
0
];
startAxis
=
clamp
(
startAxis
,
inp0
.
dims
);
endAxis
=
clamp
(
endAxis
,
inp0
.
dims
);
startAxis
=
normalize_axis
(
startAxis
,
inp0
.
dims
);
endAxis
=
normalize_axis
(
endAxis
,
inp0
.
dims
);
const
float
*
inpData
=
inp0
.
ptr
<
float
>
();
float
*
outData
=
outputs
[
0
].
ptr
<
float
>
();
...
...
modules/dnn/src/layers/reshape_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -60,14 +60,7 @@ static void computeShapeByReshapeMask(const MatShape &srcShape,
int
srcShapeSize
=
(
int
)
srcShape
.
size
();
int
maskShapeSize
=
(
int
)
maskShape
.
size
();
if
(
srcRange
==
Range
::
all
())
srcRange
=
Range
(
0
,
srcShapeSize
);
else
{
int
sz
=
srcRange
.
size
();
srcRange
.
start
=
clamp
(
srcRange
.
start
,
srcShapeSize
);
srcRange
.
end
=
srcRange
.
end
==
INT_MAX
?
srcShapeSize
:
srcRange
.
start
+
sz
;
}
srcRange
=
normalize_axis_range
(
srcRange
,
srcShapeSize
);
bool
explicitMask
=
!
maskShape
.
empty
();
// All mask values are positive.
for
(
int
i
=
0
,
n
=
maskShape
.
size
();
i
<
n
&&
explicitMask
;
++
i
)
...
...
modules/dnn/src/layers/scale_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -240,7 +240,7 @@ public:
numChannels
=
blobs
[
0
].
total
();
std
::
vector
<
size_t
>
shape
(
ieInpNode0
->
get_shape
().
size
(),
1
);
int
cAxis
=
clamp
(
axis
,
shape
.
size
());
int
cAxis
=
normalize_axis
(
axis
,
shape
.
size
());
shape
[
cAxis
]
=
numChannels
;
auto
node
=
ieInpNode0
;
...
...
modules/dnn/src/layers/slice_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -146,7 +146,7 @@ public:
for
(
int
j
=
0
;
j
<
sliceRanges
[
i
].
size
();
++
j
)
{
if
(
shapesInitialized
||
inpShape
[
j
]
>
0
)
outputs
[
i
][
j
]
=
clamp
(
sliceRanges
[
i
][
j
],
inpShape
[
j
]).
size
();
outputs
[
i
][
j
]
=
normalize_axis_range
(
sliceRanges
[
i
][
j
],
inpShape
[
j
]).
size
();
}
}
}
...
...
@@ -209,7 +209,7 @@ public:
// Clamp.
for
(
int
j
=
0
;
j
<
finalSliceRanges
[
i
].
size
();
++
j
)
{
finalSliceRanges
[
i
][
j
]
=
clamp
(
finalSliceRanges
[
i
][
j
],
inpShape
[
j
]);
finalSliceRanges
[
i
][
j
]
=
normalize_axis_range
(
finalSliceRanges
[
i
][
j
],
inpShape
[
j
]);
}
}
...
...
@@ -601,7 +601,7 @@ public:
CV_Assert
(
inputs
.
size
()
==
2
);
MatShape
dstShape
=
inputs
[
0
];
int
start
=
clamp
(
axis
,
dstShape
);
int
start
=
normalize_axis
(
axis
,
dstShape
);
for
(
int
i
=
start
;
i
<
dstShape
.
size
();
i
++
)
{
dstShape
[
i
]
=
inputs
[
1
][
i
];
...
...
@@ -620,7 +620,7 @@ public:
const
Mat
&
inpSzBlob
=
inputs
[
1
];
int
dims
=
inpBlob
.
dims
;
int
start_axis
=
clamp
(
axis
,
dims
);
int
start_axis
=
normalize_axis
(
axis
,
dims
);
std
::
vector
<
int
>
offset_final
(
dims
,
0
);
if
(
offset
.
size
()
==
1
)
...
...
modules/dnn/src/layers/softmax_layer.cpp
浏览文件 @
4f08bb50
...
...
@@ -82,7 +82,7 @@ public:
{
bool
inplace
=
Layer
::
getMemoryShapes
(
inputs
,
requiredOutputs
,
outputs
,
internals
);
MatShape
shape
=
inputs
[
0
];
int
cAxis
=
clamp
(
axisRaw
,
shape
.
size
());
int
cAxis
=
normalize_axis
(
axisRaw
,
shape
.
size
());
shape
[
cAxis
]
=
1
;
internals
.
assign
(
1
,
shape
);
return
inplace
;
...
...
@@ -115,7 +115,7 @@ public:
UMat
&
src
=
inputs
[
0
];
UMat
&
dstMat
=
outputs
[
0
];
int
axis
=
clamp
(
axisRaw
,
src
.
dims
);
int
axis
=
normalize_axis
(
axisRaw
,
src
.
dims
);
if
(
softmaxOp
.
empty
())
{
...
...
@@ -207,7 +207,7 @@ public:
const
Mat
&
src
=
inputs
[
0
];
Mat
&
dst
=
outputs
[
0
];
int
axis
=
clamp
(
axisRaw
,
src
.
dims
);
int
axis
=
normalize_axis
(
axisRaw
,
src
.
dims
);
size_t
outerSize
=
src
.
total
(
0
,
axis
),
channels
=
src
.
size
[
axis
],
innerSize
=
src
.
total
(
axis
+
1
);
...
...
@@ -318,7 +318,7 @@ public:
InferenceEngine
::
DataPtr
input
=
infEngineDataNode
(
inputs
[
0
]);
InferenceEngine
::
Builder
::
SoftMaxLayer
ieLayer
(
name
);
ieLayer
.
setAxis
(
clamp
(
axisRaw
,
input
->
getDims
().
size
()));
ieLayer
.
setAxis
(
normalize_axis
(
axisRaw
,
input
->
getDims
().
size
()));
return
Ptr
<
BackendNode
>
(
new
InfEngineBackendNode
(
ieLayer
));
}
...
...
@@ -329,7 +329,7 @@ public:
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
{
auto
&
ieInpNode
=
nodes
[
0
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
int
axis
=
clamp
(
axisRaw
,
ieInpNode
->
get_shape
().
size
());
int
axis
=
normalize_axis
(
axisRaw
,
ieInpNode
->
get_shape
().
size
());
auto
softmax
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Softmax
>
(
ieInpNode
,
axis
);
if
(
logSoftMax
)
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
std
::
make_shared
<
ngraph
::
op
::
v0
::
Log
>
(
softmax
)));
...
...
modules/dnn/src/onnx/onnx_importer.cpp
浏览文件 @
4f08bb50
...
...
@@ -503,7 +503,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
MatShape
targetShape
;
std
::
vector
<
bool
>
shouldDelete
(
inpShape
.
size
(),
false
);
for
(
int
i
=
0
;
i
<
axes
.
size
();
i
++
)
{
int
axis
=
clamp
(
axes
.
get
<
int
>
(
i
),
inpShape
.
size
());
int
axis
=
normalize_axis
(
axes
.
get
<
int
>
(
i
),
inpShape
.
size
());
shouldDelete
[
axis
]
=
true
;
}
for
(
int
axis
=
0
;
axis
<
inpShape
.
size
();
++
axis
){
...
...
@@ -515,7 +515,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
if
(
inpShape
.
size
()
==
3
&&
axes
.
size
()
<=
2
)
{
int
axis
=
clamp
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
());
int
axis
=
normalize_axis
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
());
CV_CheckNE
(
axis
,
0
,
""
);
LayerParams
reshapeLp
;
...
...
@@ -539,8 +539,8 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
avgLp
.
set
(
"pool"
,
pool
);
if
(
axes
.
size
()
==
2
)
{
CV_CheckEQ
(
clamp
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
()),
1
,
"Unsupported mode"
);
CV_CheckEQ
(
clamp
(
axes
.
get
<
int
>
(
1
),
inpShape
.
size
()),
2
,
"Unsupported mode"
);
CV_CheckEQ
(
normalize_axis
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
()),
1
,
"Unsupported mode"
);
CV_CheckEQ
(
normalize_axis
(
axes
.
get
<
int
>
(
1
),
inpShape
.
size
()),
2
,
"Unsupported mode"
);
avgLp
.
set
(
"global_pooling"
,
true
);
}
else
...
...
@@ -560,9 +560,9 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
CV_Assert
(
axes
.
size
()
<=
inpShape
.
size
()
-
2
);
std
::
vector
<
int
>
kernel_size
(
inpShape
.
size
()
-
2
,
1
);
if
(
axes
.
size
()
==
1
&&
(
clamp
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
())
<=
1
))
if
(
axes
.
size
()
==
1
&&
(
normalize_axis
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
())
<=
1
))
{
int
axis
=
clamp
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
());
int
axis
=
normalize_axis
(
axes
.
get
<
int
>
(
0
),
inpShape
.
size
());
MatShape
newShape
=
inpShape
;
newShape
[
axis
+
1
]
=
total
(
newShape
,
axis
+
1
);
newShape
.
resize
(
axis
+
2
);
...
...
@@ -584,7 +584,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
else
{
for
(
int
i
=
0
;
i
<
axes
.
size
();
i
++
)
{
int
axis
=
clamp
(
axes
.
get
<
int
>
(
i
),
inpShape
.
size
());
int
axis
=
normalize_axis
(
axes
.
get
<
int
>
(
i
),
inpShape
.
size
());
CV_Assert_N
(
axis
>=
2
+
i
,
axis
<
inpShape
.
size
());
kernel_size
[
axis
-
2
]
=
inpShape
[
axis
];
}
...
...
@@ -1376,7 +1376,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
if
(
constBlobs
.
find
(
node_proto
.
input
(
0
))
!=
constBlobs
.
end
())
{
Mat
input
=
getBlob
(
node_proto
,
0
);
int
axis
=
clamp
(
layerParams
.
get
<
int
>
(
"axis"
,
1
),
input
.
dims
);
int
axis
=
normalize_axis
(
layerParams
.
get
<
int
>
(
"axis"
,
1
),
input
.
dims
);
std
::
vector
<
int
>
out_size
(
&
input
.
size
[
0
],
&
input
.
size
[
0
]
+
axis
);
out_size
.
push_back
(
input
.
total
(
axis
));
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录