Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Greenplum
Opencv
提交
ec265417
O
Opencv
项目概览
Greenplum
/
Opencv
大约 1 年 前同步成功
通知
7
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
O
Opencv
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
ec265417
编写于
10月 11, 2022
作者:
A
Alexander Smorkalov
提交者:
GitHub
10月 11, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #22577 from zihaomu:Disable_winograd_branch_in_tryquantize
DNN: add enableWinograd API for Net
上级
1c825dd5
1e2ceca4
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
70 addition
and
18 deletion
+70
-18
modules/dnn/include/opencv2/dnn/all_layers.hpp
modules/dnn/include/opencv2/dnn/all_layers.hpp
+2
-0
modules/dnn/include/opencv2/dnn/dnn.hpp
modules/dnn/include/opencv2/dnn/dnn.hpp
+6
-0
modules/dnn/src/int8layers/convolution_layer.cpp
modules/dnn/src/int8layers/convolution_layer.cpp
+1
-1
modules/dnn/src/int8layers/layers_common.hpp
modules/dnn/src/int8layers/layers_common.hpp
+1
-1
modules/dnn/src/layers/convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+3
-2
modules/dnn/src/layers/fast_convolution/fast_convolution.cpp
modules/dnn/src/layers/fast_convolution/fast_convolution.cpp
+7
-9
modules/dnn/src/layers/fast_convolution/fast_convolution.hpp
modules/dnn/src/layers/fast_convolution/fast_convolution.hpp
+2
-2
modules/dnn/src/layers/fast_convolution/winograd_3x3s1_f63.cpp
...es/dnn/src/layers/fast_convolution/winograd_3x3s1_f63.cpp
+1
-1
modules/dnn/src/layers/layers_common.cpp
modules/dnn/src/layers/layers_common.cpp
+3
-1
modules/dnn/src/layers/layers_common.hpp
modules/dnn/src/layers/layers_common.hpp
+1
-1
modules/dnn/src/net.cpp
modules/dnn/src/net.cpp
+7
-0
modules/dnn/src/net_impl.cpp
modules/dnn/src/net_impl.cpp
+32
-0
modules/dnn/src/net_impl.hpp
modules/dnn/src/net_impl.hpp
+2
-0
modules/dnn/src/net_quantization.cpp
modules/dnn/src/net_quantization.cpp
+1
-0
modules/dnn/test/test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+1
-0
未找到文件。
modules/dnn/include/opencv2/dnn/all_layers.hpp
浏览文件 @
ec265417
...
@@ -259,6 +259,7 @@ CV__DNN_INLINE_NS_BEGIN
...
@@ -259,6 +259,7 @@ CV__DNN_INLINE_NS_BEGIN
bool
fusedActivation
=
false
;
bool
fusedActivation
=
false
;
bool
fusedAdd
=
false
;
bool
fusedAdd
=
false
;
bool
isConv2D
=
false
;
// Should be deleted after fastconv branch support Conv1D and Conv3D.
bool
isConv2D
=
false
;
// Should be deleted after fastconv branch support Conv1D and Conv3D.
bool
useWinograd
=
false
;
// Flag whether to use Winograd to speed up 3x3 convolution.
};
};
class
CV_EXPORTS
ConvolutionLayerInt8
:
public
BaseConvolutionLayer
class
CV_EXPORTS
ConvolutionLayerInt8
:
public
BaseConvolutionLayer
...
@@ -270,6 +271,7 @@ CV__DNN_INLINE_NS_BEGIN
...
@@ -270,6 +271,7 @@ CV__DNN_INLINE_NS_BEGIN
// quantization type flag. The perChannel default is true, that means it contains the parameters
// quantization type flag. The perChannel default is true, that means it contains the parameters
// of per-Channel quantization. Otherwise, that means this layer contains per-Tensor quantized parameters.
// of per-Channel quantization. Otherwise, that means this layer contains per-Tensor quantized parameters.
bool
per_channel
;
bool
per_channel
;
bool
useWinograd
=
true
;
// Flag whether to use Winograd to speed up 3x3 convolution.
static
Ptr
<
BaseConvolutionLayer
>
create
(
const
LayerParams
&
params
);
static
Ptr
<
BaseConvolutionLayer
>
create
(
const
LayerParams
&
params
);
};
};
...
...
modules/dnn/include/opencv2/dnn/dnn.hpp
浏览文件 @
ec265417
...
@@ -837,6 +837,12 @@ CV__DNN_INLINE_NS_BEGIN
...
@@ -837,6 +837,12 @@ CV__DNN_INLINE_NS_BEGIN
*/
*/
CV_WRAP
void
enableFusion
(
bool
fusion
);
CV_WRAP
void
enableFusion
(
bool
fusion
);
/** @brief Enables or disables the Winograd compute branch. The Winograd compute branch can speed up
* 3x3 Convolution at a small loss of accuracy.
* @param useWinograd true to enable the Winograd compute branch. The default is true.
*/
CV_WRAP
void
enableWinograd
(
bool
useWinograd
);
/** @brief Returns overall time for inference and timings (in ticks) for layers.
/** @brief Returns overall time for inference and timings (in ticks) for layers.
*
*
* Indexes in returned vector correspond to layers ids. Some layers can be fused with others,
* Indexes in returned vector correspond to layers ids. Some layers can be fused with others,
...
...
modules/dnn/src/int8layers/convolution_layer.cpp
浏览文件 @
ec265417
...
@@ -41,7 +41,7 @@ public:
...
@@ -41,7 +41,7 @@ public:
BaseConvolutionLayerInt8Impl
(
const
LayerParams
&
params
)
BaseConvolutionLayerInt8Impl
(
const
LayerParams
&
params
)
{
{
setParamsFrom
(
params
);
setParamsFrom
(
params
);
getConvolutionKernelParams
(
params
,
kernel_size
,
pads_begin
,
pads_end
,
strides
,
dilations
,
padMode
,
adjust_pads
);
getConvolutionKernelParams
(
params
,
kernel_size
,
pads_begin
,
pads_end
,
strides
,
dilations
,
padMode
,
adjust_pads
,
useWinograd
);
numOutput
=
params
.
get
<
int
>
(
"num_output"
);
numOutput
=
params
.
get
<
int
>
(
"num_output"
);
int
ngroups
=
params
.
get
<
int
>
(
"group"
,
1
);
int
ngroups
=
params
.
get
<
int
>
(
"group"
,
1
);
...
...
modules/dnn/src/int8layers/layers_common.hpp
浏览文件 @
ec265417
...
@@ -23,7 +23,7 @@ namespace dnn
...
@@ -23,7 +23,7 @@ namespace dnn
{
{
void
getConvolutionKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
size_t
>&
pads_begin
,
void
getConvolutionKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
size_t
>&
pads_begin
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
std
::
vector
<
size_t
>&
dilations
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
std
::
vector
<
size_t
>&
dilations
,
cv
::
String
&
padMode
,
std
::
vector
<
size_t
>&
adjust_pads
);
cv
::
String
&
padMode
,
std
::
vector
<
size_t
>&
adjust_pads
,
bool
&
useWinograd
);
void
getPoolingKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
bool
>&
globalPooling
,
void
getPoolingKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
bool
>&
globalPooling
,
std
::
vector
<
size_t
>&
pads_begin
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
cv
::
String
&
padMode
);
std
::
vector
<
size_t
>&
pads_begin
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
cv
::
String
&
padMode
);
...
...
modules/dnn/src/layers/convolution_layer.cpp
浏览文件 @
ec265417
...
@@ -89,7 +89,8 @@ public:
...
@@ -89,7 +89,8 @@ public:
BaseConvolutionLayerImpl
(
const
LayerParams
&
params
)
BaseConvolutionLayerImpl
(
const
LayerParams
&
params
)
{
{
setParamsFrom
(
params
);
setParamsFrom
(
params
);
getConvolutionKernelParams
(
params
,
kernel_size
,
pads_begin
,
pads_end
,
strides
,
dilations
,
padMode
,
adjust_pads
);
getConvolutionKernelParams
(
params
,
kernel_size
,
pads_begin
,
pads_end
,
strides
,
dilations
,
padMode
,
adjust_pads
,
useWinograd
);
numOutput
=
params
.
get
<
int
>
(
"num_output"
);
numOutput
=
params
.
get
<
int
>
(
"num_output"
);
int
ngroups
=
params
.
get
<
int
>
(
"group"
,
1
);
int
ngroups
=
params
.
get
<
int
>
(
"group"
,
1
);
...
@@ -2112,7 +2113,7 @@ public:
...
@@ -2112,7 +2113,7 @@ public:
int
dilation_w
=
dilations
.
back
();
int
dilation_w
=
dilations
.
back
();
fastConv2dImpl
=
initFastConv2d
(
ngroups
,
K
,
C
,
Hk
,
Wk
,
stride_w
,
stride_h
,
dilation_w
,
fastConv2dImpl
=
initFastConv2d
(
ngroups
,
K
,
C
,
Hk
,
Wk
,
stride_w
,
stride_h
,
dilation_w
,
dilation_h
,
pads_begin
,
pads_end
,
weightsMat
,
&
biasvec
[
0
]);
dilation_h
,
pads_begin
,
pads_end
,
weightsMat
,
&
biasvec
[
0
]
,
useWinograd
);
}
}
if
(
fastConv2dImpl
)
if
(
fastConv2dImpl
)
...
...
modules/dnn/src/layers/fast_convolution/fast_convolution.cpp
浏览文件 @
ec265417
...
@@ -23,7 +23,8 @@ Ptr<FastConv2d> initFastConv2d(
...
@@ -23,7 +23,8 @@ Ptr<FastConv2d> initFastConv2d(
const
std
::
vector
<
size_t
>&
pads_begin
,
const
std
::
vector
<
size_t
>&
pads_begin
,
const
std
::
vector
<
size_t
>&
pads_end
,
const
std
::
vector
<
size_t
>&
pads_end
,
InputArray
_weightsMat
,
InputArray
_weightsMat
,
float
*
srcBias
)
float
*
srcBias
,
bool
useWinograd
)
{
{
Ptr
<
FastConv2d
>
conv
=
makePtr
<
FastConv2d
>
();
Ptr
<
FastConv2d
>
conv
=
makePtr
<
FastConv2d
>
();
...
@@ -48,11 +49,11 @@ Ptr<FastConv2d> initFastConv2d(
...
@@ -48,11 +49,11 @@ Ptr<FastConv2d> initFastConv2d(
const
size_t
wstep
=
weightsMat
.
step1
();
const
size_t
wstep
=
weightsMat
.
step1
();
#if CV_NEON // For now, winograd is ARM platform only.
#if CV_NEON // For now, winograd is ARM platform only.
if
(
ngroups
==
1
&&
Hk
==
3
&&
Wk
==
3
&&
stride_x
==
1
&&
stride_y
==
1
&&
if
(
useWinograd
&&
ngroups
==
1
&&
Hk
==
3
&&
Wk
==
3
&&
stride_x
==
1
&&
stride_y
==
1
&&
dilation_x
==
1
&&
dilation_y
==
1
&&
K
>=
16
&&
C
>=
16
)
dilation_x
==
1
&&
dilation_y
==
1
&&
K
>=
16
&&
C
>=
16
)
conv
->
if
Winograd63
=
true
;
conv
->
use
Winograd63
=
true
;
#else
#else
conv
->
if
Winograd63
=
false
;
conv
->
use
Winograd63
=
false
;
#endif
#endif
float
*
srcWeights
=
(
float
*
)
weightsMat
.
data
;
float
*
srcWeights
=
(
float
*
)
weightsMat
.
data
;
...
@@ -115,7 +116,7 @@ Ptr<FastConv2d> initFastConv2d(
...
@@ -115,7 +116,7 @@ Ptr<FastConv2d> initFastConv2d(
}});
}});
// Prepare Weight for Winograd F(6x6, 3x3)
// Prepare Weight for Winograd F(6x6, 3x3)
if
(
conv
->
if
Winograd63
)
if
(
conv
->
use
Winograd63
)
{
{
initWinograd63
(
conv
,
weightsMat
,
K
,
C
);
initWinograd63
(
conv
,
weightsMat
,
K
,
C
);
}
}
...
@@ -191,10 +192,7 @@ void runFastConv2d(InputArray _input, OutputArray _output, const Ptr<FastConv2d>
...
@@ -191,10 +192,7 @@ void runFastConv2d(InputArray _input, OutputArray _output, const Ptr<FastConv2d>
}
}
#if CV_NEON
#if CV_NEON
if
(
conv
->
ifWinograd63
if
(
conv
->
useWinograd63
&&
inputShape
[
2
]
>
12
&&
inputShape
[
3
]
>
12
)
&&
inputShape
[
2
]
>
12
&&
inputShape
[
3
]
>
12
&&
inputShape
[
2
]
<
120
&&
inputShape
[
3
]
<
120
)
{
{
if
(
runWinograd63
(
input
,
fusedAddMat
,
output
,
conv
,
ntasks
,
minval
,
maxval
,
activ
,
ifMinMaxAct
))
if
(
runWinograd63
(
input
,
fusedAddMat
,
output
,
conv
,
ntasks
,
minval
,
maxval
,
activ
,
ifMinMaxAct
))
return
;
return
;
...
...
modules/dnn/src/layers/fast_convolution/fast_convolution.hpp
浏览文件 @
ec265417
...
@@ -44,7 +44,7 @@ struct FastConv2d
...
@@ -44,7 +44,7 @@ struct FastConv2d
std
::
vector
<
float
>
weightsBuf
;
// For generic Conv 2D
std
::
vector
<
float
>
weightsBuf
;
// For generic Conv 2D
std
::
vector
<
float
>
weightsWino63Buf
;
// For Winograd F(6x6, 3x3).
std
::
vector
<
float
>
weightsWino63Buf
;
// For Winograd F(6x6, 3x3).
std
::
vector
<
float
>
biasBuf
;
std
::
vector
<
float
>
biasBuf
;
bool
if
Winograd63
=
false
;
bool
use
Winograd63
=
false
;
bool
useAVX2
=
checkHardwareSupport
(
CPU_AVX2
);
bool
useAVX2
=
checkHardwareSupport
(
CPU_AVX2
);
bool
useNEON
=
checkHardwareSupport
(
CPU_NEON
);
bool
useNEON
=
checkHardwareSupport
(
CPU_NEON
);
};
};
...
@@ -58,7 +58,7 @@ Ptr<FastConv2d> initFastConv2d(
...
@@ -58,7 +58,7 @@ Ptr<FastConv2d> initFastConv2d(
const
std
::
vector
<
size_t
>&
pads_begin
,
const
std
::
vector
<
size_t
>&
pads_begin
,
const
std
::
vector
<
size_t
>&
pads_end
,
const
std
::
vector
<
size_t
>&
pads_end
,
InputArray
weightsMat
,
InputArray
weightsMat
,
float
*
srcBias
);
float
*
srcBias
,
bool
useWinograd
);
// It contains different computing branches, like winograd, 1x1 conv.
// It contains different computing branches, like winograd, 1x1 conv.
void
runFastConv2d
(
InputArray
_input
,
OutputArray
_output
,
const
Ptr
<
FastConv2d
>&
conv
,
int
ntasks
,
void
runFastConv2d
(
InputArray
_input
,
OutputArray
_output
,
const
Ptr
<
FastConv2d
>&
conv
,
int
ntasks
,
...
...
modules/dnn/src/layers/fast_convolution/winograd_3x3s1_f63.cpp
浏览文件 @
ec265417
...
@@ -1689,7 +1689,7 @@ int runWinograd63(InputArray _input, InputArray _fusedAddMat, OutputArray _outpu
...
@@ -1689,7 +1689,7 @@ int runWinograd63(InputArray _input, InputArray _fusedAddMat, OutputArray _outpu
void
initWinograd63
(
Ptr
<
FastConv2d
>&
conv
,
InputArray
_weightsMat
,
int
K
,
int
C
)
void
initWinograd63
(
Ptr
<
FastConv2d
>&
conv
,
InputArray
_weightsMat
,
int
K
,
int
C
)
{
{
conv
->
if
Winograd63
=
false
;
conv
->
use
Winograd63
=
false
;
}
}
int
runWinograd63
(
InputArray
_input
,
OutputArray
_output
,
const
Ptr
<
FastConv2d
>&
conv
,
int
ntasks
,
float
minval
,
float
maxval
,
ActivationLayer
*
activ
,
bool
ifMinMaxAct
)
int
runWinograd63
(
InputArray
_input
,
OutputArray
_output
,
const
Ptr
<
FastConv2d
>&
conv
,
int
ntasks
,
float
minval
,
float
maxval
,
ActivationLayer
*
activ
,
bool
ifMinMaxAct
)
...
...
modules/dnn/src/layers/layers_common.cpp
浏览文件 @
ec265417
...
@@ -187,12 +187,14 @@ void getPoolingKernelParams(const LayerParams ¶ms, std::vector<size_t>& kern
...
@@ -187,12 +187,14 @@ void getPoolingKernelParams(const LayerParams ¶ms, std::vector<size_t>& kern
void
getConvolutionKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
size_t
>&
pads_begin
,
void
getConvolutionKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
size_t
>&
pads_begin
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
std
::
vector
<
size_t
>&
dilations
,
cv
::
String
&
padMode
,
std
::
vector
<
size_t
>&
adjust_pads
)
std
::
vector
<
size_t
>&
dilations
,
cv
::
String
&
padMode
,
std
::
vector
<
size_t
>&
adjust_pads
,
bool
&
useWinograd
)
{
{
util
::
getKernelSize
(
params
,
kernel
);
util
::
getKernelSize
(
params
,
kernel
);
util
::
getStrideAndPadding
(
params
,
pads_begin
,
pads_end
,
strides
,
padMode
,
kernel
.
size
());
util
::
getStrideAndPadding
(
params
,
pads_begin
,
pads_end
,
strides
,
padMode
,
kernel
.
size
());
util
::
getParameter
(
params
,
"dilation"
,
"dilation"
,
dilations
,
true
,
std
::
vector
<
size_t
>
(
kernel
.
size
(),
1
));
util
::
getParameter
(
params
,
"dilation"
,
"dilation"
,
dilations
,
true
,
std
::
vector
<
size_t
>
(
kernel
.
size
(),
1
));
util
::
getParameter
(
params
,
"adj"
,
"adj"
,
adjust_pads
,
true
,
std
::
vector
<
size_t
>
(
kernel
.
size
(),
0
));
util
::
getParameter
(
params
,
"adj"
,
"adj"
,
adjust_pads
,
true
,
std
::
vector
<
size_t
>
(
kernel
.
size
(),
0
));
useWinograd
=
params
.
get
<
bool
>
(
"use_winograd"
,
true
);
for
(
int
i
=
0
;
i
<
dilations
.
size
();
i
++
)
for
(
int
i
=
0
;
i
<
dilations
.
size
();
i
++
)
CV_Assert
(
dilations
[
i
]
>
0
);
CV_Assert
(
dilations
[
i
]
>
0
);
...
...
modules/dnn/src/layers/layers_common.hpp
浏览文件 @
ec265417
...
@@ -61,7 +61,7 @@ namespace dnn
...
@@ -61,7 +61,7 @@ namespace dnn
{
{
void
getConvolutionKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
size_t
>&
pads_begin
,
void
getConvolutionKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
size_t
>&
pads_begin
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
std
::
vector
<
size_t
>&
dilations
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
std
::
vector
<
size_t
>&
dilations
,
cv
::
String
&
padMode
,
std
::
vector
<
size_t
>&
adjust_pads
);
cv
::
String
&
padMode
,
std
::
vector
<
size_t
>&
adjust_pads
,
bool
&
useWinograd
);
void
getPoolingKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
bool
>&
globalPooling
,
void
getPoolingKernelParams
(
const
LayerParams
&
params
,
std
::
vector
<
size_t
>&
kernel
,
std
::
vector
<
bool
>&
globalPooling
,
std
::
vector
<
size_t
>&
pads_begin
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
cv
::
String
&
padMode
);
std
::
vector
<
size_t
>&
pads_begin
,
std
::
vector
<
size_t
>&
pads_end
,
std
::
vector
<
size_t
>&
strides
,
cv
::
String
&
padMode
);
...
...
modules/dnn/src/net.cpp
浏览文件 @
ec265417
...
@@ -395,6 +395,13 @@ void Net::enableFusion(bool fusion)
...
@@ -395,6 +395,13 @@ void Net::enableFusion(bool fusion)
return
impl
->
enableFusion
(
fusion
);
return
impl
->
enableFusion
(
fusion
);
}
}
void
Net
::
enableWinograd
(
bool
useWinograd
)
{
CV_TRACE_FUNCTION
();
CV_Assert
(
impl
);
return
impl
->
enableWinograd
(
useWinograd
);
}
void
Net
::
setHalideScheduler
(
const
String
&
scheduler
)
void
Net
::
setHalideScheduler
(
const
String
&
scheduler
)
{
{
CV_TRACE_FUNCTION
();
CV_TRACE_FUNCTION
();
...
...
modules/dnn/src/net_impl.cpp
浏览文件 @
ec265417
...
@@ -55,6 +55,7 @@ Net::Impl::Impl()
...
@@ -55,6 +55,7 @@ Net::Impl::Impl()
preferableBackend
=
(
Backend
)
getParam_DNN_BACKEND_DEFAULT
();
preferableBackend
=
(
Backend
)
getParam_DNN_BACKEND_DEFAULT
();
preferableTarget
=
DNN_TARGET_CPU
;
preferableTarget
=
DNN_TARGET_CPU
;
hasDynamicShapes
=
false
;
hasDynamicShapes
=
false
;
useWinograd
=
true
;
}
}
...
@@ -2038,6 +2039,37 @@ void Net::Impl::getMemoryConsumption(
...
@@ -2038,6 +2039,37 @@ void Net::Impl::getMemoryConsumption(
}
}
}
}
void
Net
::
Impl
::
enableWinograd
(
bool
useWinograd_
)
{
if
(
useWinograd
!=
useWinograd_
)
{
useWinograd
=
useWinograd_
;
for
(
MapIdToLayerData
::
const_iterator
it
=
layers
.
begin
();
it
!=
layers
.
end
();
it
++
)
{
int
lid
=
it
->
first
;
LayerData
&
ld
=
layers
[
lid
];
Ptr
<
Layer
>&
currLayer
=
ld
.
layerInstance
;
if
(
ld
.
type
==
"Convolution"
)
{
ld
.
params
.
set
(
"use_winograd"
,
useWinograd_
);
Ptr
<
ConvolutionLayer
>
convLayer
=
ld
.
layerInstance
.
dynamicCast
<
ConvolutionLayer
>
();
if
(
!
convLayer
.
empty
())
convLayer
->
useWinograd
=
useWinograd_
;
}
if
(
ld
.
type
==
"ConvolutionInt8"
)
{
Ptr
<
ConvolutionLayerInt8
>
convLayer
=
currLayer
.
dynamicCast
<
ConvolutionLayerInt8
>
();
ld
.
params
.
set
(
"use_winograd"
,
useWinograd_
);
if
(
!
convLayer
.
empty
())
convLayer
->
useWinograd
=
useWinograd_
;
}
}
}
}
// TODO drop?
// TODO drop?
void
Net
::
Impl
::
getLayerTypes
(
std
::
vector
<
String
>&
layersTypes
)
const
void
Net
::
Impl
::
getLayerTypes
(
std
::
vector
<
String
>&
layersTypes
)
const
...
...
modules/dnn/src/net_impl.hpp
浏览文件 @
ec265417
...
@@ -64,6 +64,7 @@ struct Net::Impl : public detail::NetImplBase
...
@@ -64,6 +64,7 @@ struct Net::Impl : public detail::NetImplBase
bool
netWasQuantized
;
bool
netWasQuantized
;
bool
fusion
;
bool
fusion
;
bool
isAsync
;
// FIXIT: drop
bool
isAsync
;
// FIXIT: drop
bool
useWinograd
;
std
::
vector
<
int64
>
layersTimings
;
std
::
vector
<
int64
>
layersTimings
;
...
@@ -211,6 +212,7 @@ struct Net::Impl : public detail::NetImplBase
...
@@ -211,6 +212,7 @@ struct Net::Impl : public detail::NetImplBase
void
enableFusion
(
bool
fusion_
);
void
enableFusion
(
bool
fusion_
);
virtual
void
fuseLayers
(
const
std
::
vector
<
LayerPin
>&
blobsToKeep_
);
virtual
void
fuseLayers
(
const
std
::
vector
<
LayerPin
>&
blobsToKeep_
);
void
enableWinograd
(
bool
useWinograd_
);
void
allocateLayers
(
const
std
::
vector
<
LayerPin
>&
blobsToKeep_
);
void
allocateLayers
(
const
std
::
vector
<
LayerPin
>&
blobsToKeep_
);
...
...
modules/dnn/src/net_quantization.cpp
浏览文件 @
ec265417
...
@@ -51,6 +51,7 @@ Net Net::Impl::quantize(Net& net, InputArrayOfArrays calibData, int inputsDtype,
...
@@ -51,6 +51,7 @@ Net Net::Impl::quantize(Net& net, InputArrayOfArrays calibData, int inputsDtype,
setPreferableBackend
(
net
,
DNN_BACKEND_OPENCV
);
setPreferableBackend
(
net
,
DNN_BACKEND_OPENCV
);
setPreferableTarget
(
DNN_TARGET_CPU
);
setPreferableTarget
(
DNN_TARGET_CPU
);
enableFusion
(
false
);
enableFusion
(
false
);
enableWinograd
(
false
);
if
(
calibData
.
isMat
())
if
(
calibData
.
isMat
())
{
{
...
...
modules/dnn/test/test_torch_importer.cpp
浏览文件 @
ec265417
...
@@ -476,6 +476,7 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
...
@@ -476,6 +476,7 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
ASSERT_TRUE
(
!
net
.
empty
());
ASSERT_TRUE
(
!
net
.
empty
());
}
}
net
.
enableWinograd
(
false
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
setPreferableTarget
(
target
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录