Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
0560a218
MegEngine
项目概览
MegEngine 天元
/
MegEngine
10 个月 前同步成功
通知
392
Star
4702
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
0560a218
编写于
11月 21, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
chore(dnn/test): refactor megdnn arm_common test
GitOrigin-RevId: 41689103017705eb1c0c933b0406d48c5d669931
上级
f7731bd4
变更
6
展开全部
隐藏空白更改
内联
并排
Showing
6 changed file
with
2655 addition
and
2392 deletion
+2655
-2392
dnn/test/arm_common/conv_bias_multi_thread.cpp
dnn/test/arm_common/conv_bias_multi_thread.cpp
+190
-2391
dnn/test/arm_common/conv_bias_multi_thread_conv1x1.cpp
dnn/test/arm_common/conv_bias_multi_thread_conv1x1.cpp
+410
-0
dnn/test/arm_common/conv_bias_multi_thread_im2col.cpp
dnn/test/arm_common/conv_bias_multi_thread_im2col.cpp
+575
-0
dnn/test/arm_common/conv_bias_multi_thread_weight_preprocess.cpp
...t/arm_common/conv_bias_multi_thread_weight_preprocess.cpp
+1240
-0
dnn/test/common/conv_bias.cpp
dnn/test/common/conv_bias.cpp
+193
-0
dnn/test/common/conv_bias.h
dnn/test/common/conv_bias.h
+47
-1
未找到文件。
dnn/test/arm_common/conv_bias_multi_thread.cpp
浏览文件 @
0560a218
此差异已折叠。
点击以展开。
dnn/test/arm_common/conv_bias_multi_thread_conv1x1.cpp
0 → 100644
浏览文件 @
0560a218
/**
* \file dnn/test/arm_common/conv_bias_multi_thread_conv1x1.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "megdnn/dtype.h"
#include "test/arm_common/fixture.h"
#include "test/common/benchmarker.h"
#include "test/common/conv_bias.h"
#include "test/arm_common/cpuinfo_help.h"
using
namespace
megdnn
;
using
namespace
test
;
using
namespace
conv_bias
;
#ifdef __ARM_FEATURE_DOTPROD
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_CONV1x1_QUANTIZEDSYM_MK4_DOT
)
{
UniformIntRNG
rng
{
-
50
,
50
};
#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, QUAN_NLMODE, ONLY_BR_BIASMODE, 1, \
true, false, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name); \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE, \
ONLY_NO_BIASMODE, 1, true, false, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name); \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE, \
ONLY_NO_BIASMODE, 1, true, false, true), \
handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
dtype::Int32(), {}, name);
float
epsilon
=
0.001
;
#if MEGDNN_AARCH64
cb
(
"CONV1x1:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD"
);
#elif MEGDNN_ARMV7
cb
(
"CONV1x1:AARCH32_INT8_MK4_8X4X4_DOTPROD"
);
#endif
#undef cb
}
#endif
// clang-format on
/***************************** Conv1x1 Algo Test ***********************/
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_F32
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_conv_bias_1x1_args
(
false
,
false
);
#if MEGDNN_AARCH64
check_conv_bias
(
args
,
handle
(),
"CONV1x1:AARCH64_F32K8X12X1:24"
);
#elif MEGDNN_ARMV7
check_conv_bias
(
args
,
handle
(),
"CONV1x1:ARMV7_F32:48"
);
#endif
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
check_conv_bias
(
gemv_args
,
handle
(),
"CONV1x1_GEMV"
);
}
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_MK4_PACK_F32
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_nchw44_conv_bias_args
({
1
},
FULL_NLMODE
,
ALL_BIASMODE
,
1
,
true
);
#if MEGDNN_AARCH64
check_conv_bias
(
args
,
handle
(),
"CONV1x1:AARCH64_F32_MK4_K8X12X1:24"
);
#elif MEGDNN_ARMV7
check_conv_bias
(
args
,
handle
(),
"CONV1x1:ARMV7_F32_MK4_PACK_4X12:24"
);
#endif
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
check_conv_bias
(
gemv_args
,
handle
(),
"CONV1x1_GEMV"
);
}
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_MK4_NO_PACK_F32
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_nchw44_conv_bias_args
({
1
},
FULL_NLMODE
,
ALL_BIASMODE
,
1
,
true
);
std
::
vector
<
conv_bias
::
TestArg
>
args_of_4
;
for
(
auto
&&
arg
:
args
)
{
if
(
arg
.
src
.
shape
[
2
]
*
arg
.
src
.
shape
[
3
]
%
4
==
0
)
{
args_of_4
.
push_back
(
arg
);
}
}
#if MEGDNN_AARCH64
check_conv_bias
(
args_of_4
,
handle
(),
"CONV1x1:AARCH64_F32_MK4_4x16:24"
);
#elif MEGDNN_ARMV7
check_conv_bias
(
args_of_4
,
handle
(),
"CONV1x1:ARMV7_F32_MK4_4x8:48"
);
#endif
}
#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_F16
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_conv_bias_1x1_args
(
false
,
false
);
NormalRNG
rng
(
1
);
#if MEGDNN_AARCH64
checker_conv_bias_common
(
args
,
handle
(),
&
rng
,
0.03
,
dtype
::
Float16
{},
dtype
::
Float16
{},
dtype
::
Float16
{},
dtype
::
Float16
{},
"CONV1x1:AARCH64_F16_K8X24X1:48"
);
#elif MEGDNN_ARMV7
checker_conv_bias_common
(
args
,
handle
(),
&
rng
,
0.03
,
dtype
::
Float16
{},
dtype
::
Float16
{},
dtype
::
Float16
{},
dtype
::
Float16
{},
"CONV1x1:AARCH32_F16_K4X16X1:24"
);
#endif
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
check_conv_bias
(
gemv_args
,
handle
(),
"CONV1x1_GEMV"
);
}
#endif
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_QUANTIZEDSYM
)
{
UniformIntRNG
rng
{
-
50
,
50
};
float
epsilon
=
0.001
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_conv_bias_1x1_args
(
false
,
false
,
true
,
true
);
#define cb(name) \
checker_conv_bias_common( \
args, handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb
(
"CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:24"
);
#else
cb
(
"CONV1x1:AARCH64_INT8X8X32_K8X8X8:24"
);
cb
(
"CONV1x1:AARCH64_INT8X8X32_K4X4X16:48"
);
#endif
#elif MEGDNN_ARMV7
epsilon
=
1
;
cb
(
"CONV1x1:ARMV7_INT8X8X32_K4X8X8:48"
);
#endif
#undef cb
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
checker_conv_bias_common
(
gemv_args
,
handle
(),
&
rng
,
epsilon
,
dtype
::
QuantizedS8
(
2.5
f
),
dtype
::
QuantizedS8
(
2.5
f
),
dtype
::
QuantizedS32
(
6.25
f
),
dtype
::
QuantizedS8
(
60.25
f
),
"CONV1x1_GEMV"
);
}
#if MEGDNN_AARCH64 || MEGDNN_ARMV7
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_QUANTIZEDASYM
)
{
UniformIntRNG
rng
{
-
50
,
50
};
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_conv_bias_1x1_args
(
false
,
false
,
true
,
true
);
#define cb(name) \
checker_conv_bias_common(args, handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), \
dtype::Quantized8Asymm(50.3f, (uint8_t)120), \
name);
float
epsilon
=
0.001
;
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb
(
"CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:48"
);
#else
cb
(
"CONV1x1:AARCH64_QUINT8_K8X8X8:24"
);
#endif
#elif MEGDNN_ARMV7
epsilon
=
1
;
cb
(
"CONV1x1:ARMV7_QUINT8_K4X8X8:48"
);
#endif
#undef cb
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
checker_conv_bias_common
(
gemv_args
,
handle
(),
&
rng
,
epsilon
,
dtype
::
Quantized8Asymm
(
1.2
f
,
(
uint8_t
)
125
),
dtype
::
Quantized8Asymm
(
1.3
f
,
(
uint8_t
)
129
),
dtype
::
QuantizedS32
(
1.2
*
1.3
),
dtype
::
Quantized8Asymm
(
50.3
f
,
(
uint8_t
)
120
),
"CONV1x1_GEMV"
);
}
#endif
#if MEGDNN_AARCH64 || MEGDNN_ARMV7
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_QUINT8x8x32
)
{
NormalRNG
rng
(
128.
f
);
float
epsilon
=
0.001
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_conv_bias_1x1_args
(
true
,
true
);
#define cb(name) \
checker_conv_bias_common(args, handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), {}, name);
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb
(
"CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:24"
);
#else
cb
(
"CONV1x1:AARCH64_QUINT8_K8X8X8:48"
);
#endif
#elif MEGDNN_ARMV7
#if __ARM_FEATURE_DOTPROD
cb
(
"CONV1x1:AARCH32_QUINT8_K4X8X4:48"
);
#endif
cb
(
"CONV1x1:ARMV7_QUINT8_K4X8X8:24"
);
#endif
#undef cb
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
checker_conv_bias_common
(
gemv_args
,
handle
(),
&
rng
,
epsilon
,
dtype
::
Quantized8Asymm
(
1.2
f
,
(
uint8_t
)
125
),
dtype
::
Quantized8Asymm
(
1.3
f
,
(
uint8_t
)
129
),
dtype
::
QuantizedS32
(
1.2
*
1.3
),
{},
"CONV1x1_GEMV"
);
}
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONVBIAS_1X1_S1_INT8x8x16
)
{
UniformIntRNG
rng
{
-
50
,
50
};
float
epsilon
=
0.001
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_conv_bias_1x1_args
(
false
,
true
,
false
,
false
);
std
::
vector
<
conv_bias
::
TestArg
>
args_nchw44
=
get_nchw44_conv_bias_args
(
{
1
},
ONLY_IDENTITY_NLMODE
,
BR_AND_BIAS_BIASMODE
,
1
,
true
);
#define cb(name) \
checker_conv_bias_common(args, handle(), &rng, epsilon, dtype::Int8{}, \
dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, \
name);
#define cb_nchw44(name) \
checker_conv_bias_common(args_nchw44, handle(), &rng, epsilon, \
dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, \
dtype::Int16{}, name);
#if MEGDNN_AARCH64
cb
(
"CONV1x1:AARCH64_INT8X8X16_K8X8X8:24"
);
cb
(
"CONV1x1:AARCH64_INT8X8X16_K4X4X16:24"
);
cb_nchw44
(
"CONV1x1:AARCH64_INT8X8X16_MK4_4X4X8:48"
);
cb_nchw44
(
"CONV1x1:AARCH64_INT8X8X16_MK4_16X12X4:48"
);
#elif MEGDNN_ARMV7
cb
(
"CONV1x1:ARMV7_INT8X8X16_K4X8X8:24"
);
cb
(
"CONV1x1:ARMV7_INT8X8X16_K4X2X16:48"
);
cb_nchw44
(
"CONV1x1:ARMV7_INT8X8X16_MK4_K8X8X4:48"
);
#endif
cb
(
"CONV1x1:ARM_COMMON_INT8X8X16:48"
);
#undef cb
#undef cb_nchw44
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
checker_conv_bias_common
(
gemv_args
,
handle
(),
&
rng
,
epsilon
,
dtype
::
Int8
{},
dtype
::
Int8
{},
dtype
::
Int16
{},
dtype
::
Int16
{},
"CONV1x1_GEMV"
);
}
#endif
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_INT8x8x32
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_conv_bias_1x1_args
(
false
,
true
,
false
,
false
);
#define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb
(
"CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:48"
);
#else
cb
(
"CONV1x1:AARCH64_INT8X8X32_K8X8X8:24"
);
cb
(
"CONV1x1:AARCH64_INT8X8X32_K4X4X16:24"
);
#endif
#elif MEGDNN_ARMV7
#if __ARM_FEATURE_DOTPROD
cb
(
"CONV1x1:AARCH32_INT8_K6X8X4:48"
);
#endif
cb
(
"CONV1x1:ARMV7_INT8X8X32_K4X8X8:24"
);
#endif
#if MEGDNN_ARMV7
cb
(
"CONV1x1:ARMV7_INT8X8X32_K4X2X16:48"
);
#endif
#undef cb
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
checker_conv_bias_mul_int8x8x32
(
gemv_args
,
handle
(),
"CONV1x1_GEMV"
);
}
#ifndef __ARM_FEATURE_DOTPROD
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_INT8x8x32_MK4
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_nchw44_conv_bias_args
({
1
},
ONLY_IDENTITY_NLMODE
,
ONLY_NO_BIASMODE
,
1
,
true
);
#define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
#if MEGDNN_AARCH64
cb
(
"CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24"
);
#elif MEGDNN_ARMV7
cb
(
"CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24"
);
#endif
#undef cb
UniformIntRNG
rng
{
-
50
,
50
};
float
epsilon
=
0.001
;
#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1, \
true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
#if MEGDNN_AARCH64
cb
(
"CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24"
);
#elif MEGDNN_ARMV7
epsilon
=
1
;
cb
(
"CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24"
);
#endif
#undef cb
}
#endif
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_INT8x8x32_NCHW44
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_nchw44_conv_bias_args
({
1
},
QUAN_NLMODE
,
BR_AND_NO_BIASMODE
,
1
,
true
);
UniformIntRNG
rng
{
-
50
,
50
};
float
epsilon
=
0.001
;
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
checker_conv_bias_common
(
gemv_args
,
handle
(),
&
rng
,
epsilon
,
dtype
::
QuantizedS8
(
2.5
f
),
dtype
::
QuantizedS8
(
2.5
f
),
dtype
::
QuantizedS32
(
6.25
f
),
dtype
::
QuantizedS8
(
60.25
f
),
"CONV1x1_GEMV"
);
}
#ifdef __ARM_FEATURE_DOTPROD
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_S1_INT8x8x32_NCHW44_DOT
)
{
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_nchw44_conv_bias_args
(
{
1
},
QUAN_NLMODE
,
BR_AND_NO_BIASMODE
,
1
,
true
,
false
,
true
);
UniformIntRNG
rng
{
-
50
,
50
};
float
epsilon
=
0.001
;
std
::
vector
<
conv_bias
::
TestArg
>
gemv_args
;
for
(
auto
&&
arg
:
args
)
if
(
arg
.
src
.
shape
[
2
]
==
1
&&
arg
.
src
.
shape
[
3
]
==
1
)
{
gemv_args
.
emplace_back
(
arg
);
}
checker_conv_bias_common
(
gemv_args
,
handle
(),
&
rng
,
epsilon
,
dtype
::
QuantizedS8
(
2.5
f
),
dtype
::
QuantizedS8
(
2.5
f
),
dtype
::
QuantizedS32
(
6.25
f
),
dtype
::
QuantizedS8
(
60.25
f
),
"CONV1x1_GEMV"
);
}
#endif
#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_MK4_PACK_F32_A55
)
{
CpuInfoTmpReplace
cpu_replace_guard
(
cpuinfo_uarch_cortex_a55
);
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_nchw44_conv_bias_args
({
1
},
FULL_NLMODE
,
ALL_BIASMODE
,
1
,
true
);
check_conv_bias
(
args
,
handle
(),
"CONV1x1:AARCH64_F32_MK4_K8X12X1:24"
);
}
#endif
#endif
#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F
(
ARM_COMMON_MULTI_THREADS
,
CONV_BIAS_1X1_MK4_PACK_F32_A53
)
{
CpuInfoTmpReplace
cpu_replace_guard
(
cpuinfo_uarch_cortex_a53
);
using
namespace
conv_bias
;
std
::
vector
<
conv_bias
::
TestArg
>
args
=
get_nchw44_conv_bias_args
({
1
},
FULL_NLMODE
,
ALL_BIASMODE
,
1
,
true
);
check_conv_bias
(
args
,
handle
(),
"CONV1x1:AARCH64_F32_MK4_K8X12X1:24"
);
}
#endif
#endif
// vim: syntax=cpp.doxygen
dnn/test/arm_common/conv_bias_multi_thread_im2col.cpp
0 → 100644
浏览文件 @
0560a218
此差异已折叠。
点击以展开。
dnn/test/arm_common/conv_bias_multi_thread_weight_preprocess.cpp
0 → 100644
浏览文件 @
0560a218
此差异已折叠。
点击以展开。
dnn/test/common/conv_bias.cpp
浏览文件 @
0560a218
...
...
@@ -1196,6 +1196,199 @@ void winograd_algo_extra_impl(const TensorNDArray& tensors, uint32_t m,
free
(
wb
.
ptr
());
};
void
checker_conv_bias_common
(
std
::
vector
<
conv_bias
::
TestArg
>
args
,
Handle
*
handle
,
RNG
*
rng
,
float
epsilon
,
DType
type0
,
DType
type1
,
DType
type2
,
DType
type3
,
const
char
*
algo_name
)
{
using
namespace
conv_bias
;
Checker
<
ConvBias
>
checker
(
handle
);
checker
.
set_before_exec_callback
(
conv_bias
::
ConvBiasAlgoChecker
<
ConvBias
>
(
algo_name
));
checker
.
set_dtype
(
0
,
type0
);
checker
.
set_dtype
(
1
,
type1
);
checker
.
set_dtype
(
2
,
type2
);
checker
.
set_dtype
(
4
,
type3
);
checker
.
set_epsilon
(
epsilon
);
if
(
NULL
!=
rng
)
{
checker
.
set_rng
(
0
,
rng
).
set_rng
(
1
,
rng
).
set_rng
(
2
,
rng
).
set_rng
(
3
,
rng
);
}
for
(
auto
&&
arg
:
args
)
{
checker
.
set_param
(
arg
.
param
).
execs
(
{
arg
.
src
,
arg
.
filter
,
arg
.
bias
,
{},
{}});
}
}
void
checker_conv_bias_mul_int8x8x32
(
std
::
vector
<
conv_bias
::
TestArg
>
args
,
Handle
*
handle
,
const
char
*
algo_name
)
{
using
namespace
conv_bias
;
float
epsilon
=
0.001
;
#if MEGDNN_ARMV7
epsilon
=
1.0
;
#endif
Checker
<
ConvBias
>
checker
(
handle
);
checker
.
set_before_exec_callback
(
conv_bias
::
ConvBiasAlgoChecker
<
ConvBias
>
(
algo_name
));
checker
.
set_dtype
(
0
,
dtype
::
Int8
());
checker
.
set_dtype
(
1
,
dtype
::
Int8
());
checker
.
set_dtype
(
2
,
dtype
::
Int32
());
checker
.
set_dtype
(
4
,
dtype
::
Int32
());
checker
.
set_epsilon
(
epsilon
);
for
(
auto
&&
arg
:
args
)
{
checker
.
set_param
(
arg
.
param
).
execs
({
arg
.
src
,
arg
.
filter
,
{},
{},
{}});
}
UniformIntRNG
rng
{
-
50
,
50
};
for
(
auto
&&
arg
:
args
)
{
checker
.
set_dtype
(
0
,
dtype
::
QuantizedS8
(
2.5
f
))
.
set_dtype
(
1
,
dtype
::
QuantizedS8
(
2.5
f
))
.
set_dtype
(
2
,
dtype
::
QuantizedS32
(
6.25
f
))
.
set_dtype
(
4
,
dtype
::
QuantizedS32
(
6.25
f
))
.
set_rng
(
0
,
&
rng
)
.
set_rng
(
1
,
&
rng
)
.
set_rng
(
2
,
&
rng
)
.
set_param
(
arg
.
param
)
.
set_epsilon
(
epsilon
)
.
execs
({
arg
.
src
,
arg
.
filter
,
{},
{},
{}});
}
}
void
checker_conv_bias_int8x8x32_preprocess
(
std
::
vector
<
conv_bias
::
TestArg
>
args
,
Handle
*
handle
,
const
char
*
algo_name
)
{
using
namespace
conv_bias
;
Checker
<
ConvBiasForward
,
OprWeightPreprocessProxy
<
ConvBiasForward
>>
checker
(
handle
);
checker
.
set_before_exec_callback
(
conv_bias
::
ConvBiasAlgoChecker
<
ConvBias
>
(
algo_name
));
checker
.
set_dtype
(
0
,
dtype
::
Int8
());
checker
.
set_dtype
(
1
,
dtype
::
Int8
());
checker
.
set_dtype
(
2
,
dtype
::
Int32
());
checker
.
set_dtype
(
4
,
dtype
::
Int32
());
for
(
auto
&&
arg
:
args
)
{
checker
.
set_param
(
arg
.
param
).
execs
({
arg
.
src
,
arg
.
filter
,
{},
{},
{}});
}
UniformIntRNG
rng
{
-
50
,
50
};
for
(
auto
&&
arg
:
args
)
{
checker
.
set_dtype
(
0
,
dtype
::
QuantizedS8
(
2.5
f
))
.
set_dtype
(
1
,
dtype
::
QuantizedS8
(
2.5
f
))
.
set_dtype
(
2
,
dtype
::
QuantizedS32
(
6.25
f
))
.
set_dtype
(
4
,
dtype
::
QuantizedS32
(
6.25
f
))
.
set_rng
(
0
,
&
rng
)
.
set_rng
(
1
,
&
rng
)
.
set_rng
(
2
,
&
rng
)
.
set_param
(
arg
.
param
)
.
execs
({
arg
.
src
,
arg
.
filter
,
{},
{},
{}});
}
}
std
::
vector
<
conv_bias
::
TestArg
>
get_nchw44_conv_bias_args
(
std
::
vector
<
size_t
>
kernel_vec
,
std
::
vector
<
param
::
ConvBias
::
NonlineMode
>
nlmode_vec
,
std
::
vector
<
megdnn
::
BiasMode
>
biasmode_vec
,
size_t
stride
,
bool
no_pad
,
bool
is_input_nchw
,
bool
is_nchw44_dot
)
{
using
namespace
conv_bias
;
using
NLMode
=
param
::
ConvBias
::
NonlineMode
;
std
::
vector
<
TestArg
>
args
;
MEGDNN_MARK_USED_VAR
(
no_pad
);
auto
pack
=
[
&
](
size_t
n
,
size_t
oc
,
size_t
ic
,
size_t
h
,
size_t
w
,
size_t
kernel
,
size_t
stride
,
size_t
group
,
NLMode
nlmode
,
megdnn
::
BiasMode
bias_mode
,
int
any_pad
=
-
1
)
{
constexpr
int
pack_c
=
4
;
const
size_t
pad
=
any_pad
>=
0
?
any_pad
:
kernel
/
2
;
auto
oc_per_group
=
oc
/
group
;
auto
ic_per_group
=
ic
/
group
;
bool
ok_group
=
(
oc
%
group
==
0
&&
ic
%
group
==
0
)
&&
oc_per_group
%
pack_c
==
0
&&
oc_per_group
>
0
&&
ic_per_group
>
0
;
bool
nchw_disable
=
group
>
1
||
ic_per_group
>=
4
;
bool
nchw44_disable
=
ic_per_group
%
pack_c
!=
0
;
bool
invalid_pad
=
(
w
+
2
*
pad
<
kernel
)
||
(
h
+
2
*
pad
<
kernel
);
if
(
!
(
ok_group
)
||
invalid_pad
)
{
return
;
}
if
((
is_input_nchw
&&
nchw_disable
)
||
(
!
is_input_nchw
&&
nchw44_disable
))
{
return
;
}
size_t
kernel_h
=
kernel
;
size_t
kernel_w
=
kernel
;
param
::
ConvBias
param
;
if
(
!
is_nchw44_dot
)
{
param
.
format
=
param
::
ConvBias
::
Format
::
NCHW44
;
}
else
{
param
.
format
=
param
::
ConvBias
::
Format
::
NCHW44_DOT
;
}
param
.
stride_h
=
stride
;
param
.
stride_w
=
stride
;
param
.
pad_h
=
pad
;
param
.
pad_w
=
pad
;
param
.
nonlineMode
=
nlmode
;
auto
src_tensor_shape
=
TensorShape
{
n
,
ic
/
pack_c
,
h
,
w
,
pack_c
};
auto
weight_tensor_shape
=
TensorShape
{
oc
/
pack_c
,
ic
/
pack_c
,
kernel_h
,
kernel_w
,
pack_c
,
pack_c
};
auto
bias_tensor_shape
=
TensorShape
{};
if
(
bias_mode
==
megdnn
::
BiasMode
::
BROADCAST_CHANNEL_BIAS
)
{
bias_tensor_shape
=
{
1
,
oc
/
pack_c
,
1
,
1
,
pack_c
};
}
else
if
(
bias_mode
==
megdnn
::
BiasMode
::
BIAS
)
{
bias_tensor_shape
=
{
n
,
oc
/
pack_c
,
(
h
+
2
*
pad
-
kernel
)
/
stride
+
1
,
(
w
+
2
*
pad
-
kernel
)
/
stride
+
1
,
pack_c
};
}
if
(
group
==
1
)
{
param
.
sparse
=
param
::
ConvBias
::
Sparse
::
DENSE
;
}
else
if
(
group
>
1
&&
ic
/
group
==
1
&&
oc
/
group
==
1
)
{
megdnn_assert
(
0
,
"not support channel wise"
);
param
.
sparse
=
param
::
ConvBias
::
Sparse
::
GROUP
;
weight_tensor_shape
=
TensorShape
{
group
/
pack_c
,
1
,
1
,
kernel_h
,
kernel_w
,
pack_c
};
}
else
if
(
group
>
1
&&
oc_per_group
%
pack_c
==
0
&&
oc
/
group
>
0
&&
ic_per_group
%
pack_c
==
0
&&
ic
/
group
>
0
)
{
param
.
sparse
=
param
::
ConvBias
::
Sparse
::
GROUP
;
weight_tensor_shape
=
TensorShape
{
group
,
oc_per_group
/
pack_c
,
ic_per_group
/
pack_c
,
kernel_h
,
kernel_w
,
pack_c
,
pack_c
};
}
if
(
is_input_nchw
)
{
src_tensor_shape
=
TensorShape
{
n
,
ic
,
h
,
w
};
weight_tensor_shape
=
TensorShape
{
oc
/
pack_c
,
kernel_h
,
kernel_w
,
ic
,
pack_c
};
}
args
.
emplace_back
(
param
,
src_tensor_shape
,
weight_tensor_shape
,
bias_tensor_shape
);
};
for
(
auto
bias
:
biasmode_vec
)
for
(
auto
nlmode
:
nlmode_vec
)
for
(
size_t
n
:
{
1
,
2
})
for
(
size_t
kernel
:
kernel_vec
)
for
(
size_t
oc
:
{
4
,
12
})
for
(
size_t
ic
:
{
1
,
3
,
4
,
12
})
for
(
size_t
h
:
{
1
,
3
,
12
})
for
(
size_t
w
:
{
1
,
16
,
23
})
{
for
(
size_t
group
=
1
;
group
<=
std
::
min
(
std
::
min
(
oc
,
ic
),
4
_z
);
++
group
)
{
if
(
kernel
!=
1
&&
(
h
==
1
||
w
==
1
))
{
continue
;
}
pack
(
n
,
oc
,
ic
,
h
,
w
,
kernel
,
stride
,
group
,
nlmode
,
bias
);
}
}
return
args
;
}
}
// namespace conv_bias
}
// namespace test
}
// namespace megdnn
...
...
dnn/test/common/conv_bias.h
浏览文件 @
0560a218
...
...
@@ -97,7 +97,53 @@ void checker_conv_bias_int8x8x16(
void
winograd_algo_extra_impl
(
const
TensorNDArray
&
tensors
,
uint32_t
m
,
param
::
ConvBias
param
,
Handle
*
handle
,
param
::
MatrixMul
::
Format
format
);
void
checker_conv_bias_common
(
std
::
vector
<
conv_bias
::
TestArg
>
args
,
Handle
*
handle
,
RNG
*
rng
,
float
epsilon
,
DType
type0
,
DType
type1
,
DType
type2
,
DType
type3
,
const
char
*
algo_name
);
std
::
vector
<
conv_bias
::
TestArg
>
get_nchw44_conv_bias_args
(
std
::
vector
<
size_t
>
kernel_vec
,
std
::
vector
<
param
::
ConvBias
::
NonlineMode
>
nlmode_vec
,
std
::
vector
<
megdnn
::
BiasMode
>
biasmode_vec
,
size_t
stride
,
bool
no_pad
=
false
,
bool
is_input_nchw
=
false
,
bool
is_nchw44_dot
=
false
);
void
checker_conv_bias_mul_int8x8x32
(
std
::
vector
<
conv_bias
::
TestArg
>
args
,
Handle
*
handle
,
const
char
*
algo_name
);
void
checker_conv_bias_int8x8x32_preprocess
(
std
::
vector
<
conv_bias
::
TestArg
>
args
,
Handle
*
handle
,
const
char
*
algo_name
);
#define FULL_NLMODE \
{ \
param::ConvBias::NonlineMode::IDENTITY, \
param::ConvBias::NonlineMode::RELU, \
param::ConvBias::NonlineMode::H_SWISH, \
param::ConvBias::NonlineMode::SIGMOID \
}
#define QUAN_NLMODE \
{ \
param::ConvBias::NonlineMode::IDENTITY, \
param::ConvBias::NonlineMode::RELU, \
param::ConvBias::NonlineMode::H_SWISH \
}
#define ONLY_IDENTITY_NLMODE \
{ param::ConvBias::NonlineMode::IDENTITY }
#define ALL_BIASMODE \
{ \
megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BROADCAST_CHANNEL_BIAS, \
megdnn::BiasMode::BIAS \
}
#define BR_AND_NO_BIASMODE \
{ megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BROADCAST_CHANNEL_BIAS }
#define BR_AND_BIAS_BIASMODE \
{ megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BIAS }
#define ONLY_BR_BIASMODE \
{ megdnn::BiasMode::BROADCAST_CHANNEL_BIAS }
#define ONLY_NO_BIASMODE \
{ megdnn::BiasMode::NO_BIAS }
#define ONLY_BIAS_BIASMODE \
{ megdnn::BiasMode::BIAS }
}
// namespace conv_bias
}
// namespace test
}
// namespace megdnn
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录