提交 99cfefbf 编写于 作者: M Megvii Engine Team

fix(test): fix test copybara

GitOrigin-RevId: 19b7bdf3773cc7e4a240cf9cffc50be98065eb61
上级 0d7ace15
......@@ -961,6 +961,64 @@ void benchmark_winograd(
}
#endif // MEGDNN_WITH_BENCHMARK
template <class Checker>
void check_winograd(
const char* algo_name, Checker& checker,
const std::vector<conv_bias::TestArg>& args, param::MatrixMul::Format format,
param::ConvBias::Format layout) {
const char* matmul_name;
#if MEGDNN_AARCH64
if (format == param::MatrixMul::Format::MK4) {
matmul_name = "AARCH64_F32_MK4_4x16";
} else if (format == param::MatrixMul::Format::MK8) {
matmul_name = "AARCH64_INT16X16X32_MK8_8X8";
} else {
matmul_name = "AARCH64_F32K8X12X1";
}
#elif MEGDNN_ARMV7
if (format == param::MatrixMul::Format::MK4) {
matmul_name = "ARMV7_F32_MK4_4x8";
} else if (format == param::MatrixMul::Format::MK8) {
matmul_name = "ARMV7_INT16X16X32_MK8_4X8";
} else {
matmul_name = "ARMV7_F32";
}
#else
if (format == param::MatrixMul::Format::MK4) {
matmul_name = "FB_GI_F32_MK4_4x8";
} else {
matmul_name = "FB_GI_F32_4x12";
}
#endif
std::string winograd_algo_name;
if (layout == megdnn::param::ConvBias::Format::NCHW) {
winograd_algo_name = ssprintf("WINOGRAD:%s:%s", matmul_name, algo_name);
} else if (layout == megdnn::param::ConvBias::Format::NCHW44) {
winograd_algo_name = ssprintf("WINOGRAD_NCHW44:%s:%s", matmul_name, algo_name);
} else {
megdnn_throw("Invalid layout");
}
checker.set_before_exec_callback(
conv_bias::ConvBiasAlgoChecker<ConvBias>(winograd_algo_name.c_str()));
for (auto&& arg : args) {
checker.set_param(arg.param).execs({arg.src, arg.filter, arg.bias, {}, {}});
}
}
template void check_winograd<megdnn::test::Checker<megdnn::ConvBias>>(
const char* algo_name, megdnn::test::Checker<megdnn::ConvBias>& checker,
const std::vector<conv_bias::TestArg>& args, param::MatrixMul::Format format,
param::ConvBias::Format layout);
using WeightPreprocessChecker = megdnn::test::Checker<
megdnn::ConvBias, megdnn::test::OprWeightPreprocessProxy<megdnn::ConvBias>>;
template void check_winograd<WeightPreprocessChecker>(
const char* algo_name, WeightPreprocessChecker& checker,
const std::vector<conv_bias::TestArg>& args, param::MatrixMul::Format format,
param::ConvBias::Format layout);
std::vector<conv_bias::TestArg> get_conv_bias_args(
std::vector<size_t> kernel, size_t stride, bool no_pad, bool no_bias,
bool no_nonlinemode, bool quantized_nlmod, bool only_broadcast_bias) {
......
......@@ -66,6 +66,13 @@ void benchmark_winograd(
const char* algo_name, megdnn::Handle* handle, size_t kernel,
size_t pack_size = 1);
#endif // MEGDNN_WITH_BENCHMARK
template <class Checker>
void check_winograd(
const char* algo_name, Checker& checker,
const std::vector<megdnn::test::conv_bias::TestArg>& args,
megdnn::param::MatrixMul::Format format =
megdnn::param::MatrixMul::Format::DEFAULT,
megdnn::param::ConvBias::Format layout = megdnn::param::ConvBias::Format::NCHW);
std::vector<megdnn::test::conv_bias::TestArg> get_conv_bias_args(
std::vector<size_t> kernel, size_t stride, bool no_pad, bool no_bias,
......
......@@ -492,6 +492,146 @@ TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_DIRECT_FP32_STR1) {
handle(), "F32STRD1");
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F23_4) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_mk_packed_args();
Checker<ConvBiasForward> checker(handle());
check_winograd("4:2:32", checker, args, param::MatrixMul::Format::MK4);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F23_4_NCHW44) {
using namespace conv_bias;
std::vector<TestArg> args =
get_nchw44_conv_bias_args({3}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1);
Checker<ConvBiasForward> checker(handle());
check_winograd(
"4:2:32", checker, args, param::MatrixMul::Format::MK4,
param::ConvBias::Format::NCHW44);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F23_4_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_mk_packed_args();
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd("4:2:32", checker, args, param::MatrixMul::Format::MK4);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F23_4_NCHW44_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args =
get_nchw44_conv_bias_args({3}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1);
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd(
"4:2:32", checker, args, param::MatrixMul::Format::MK4,
param::ConvBias::Format::NCHW44);
}
TEST_F(FALLBACK, CONVBIAS_GI_WINOGRAD_F63_4) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_mk_packed_args();
Checker<ConvBiasForward> checker(handle());
check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4);
}
TEST_F(FALLBACK, CONVBIAS_GI_WINOGRAD_F63_4_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_mk_packed_args();
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F63) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_args(3);
Checker<ConvBiasForward> checker(handle());
check_winograd("1:6:32", checker, args);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F63_4) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_mk_packed_args();
Checker<ConvBiasForward> checker(handle());
check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F63_4_NCHW44) {
using namespace conv_bias;
std::vector<TestArg> args =
get_nchw44_conv_bias_args({3}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1);
Checker<ConvBiasForward> checker(handle());
check_winograd(
"4:6:16", checker, args, param::MatrixMul::Format::MK4,
param::ConvBias::Format::NCHW44);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F54) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_args(4);
Checker<ConvBiasForward> checker(handle());
check_winograd("1:5:32", checker, args);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F45) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_args(5);
Checker<ConvBiasForward> checker(handle());
check_winograd("1:4:32", checker, args);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F63_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_args(3);
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd("1:6:32", checker, args);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F63_4_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_mk_packed_args();
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F63_4_NCHW44_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args =
get_nchw44_conv_bias_args({3}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1);
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd(
"4:6:16", checker, args, param::MatrixMul::Format::MK4,
param::ConvBias::Format::NCHW44);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F54_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_args(4);
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd("1:5:32", checker, args);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_F45_WEIGHT_PREPROCESS) {
using namespace conv_bias;
std::vector<TestArg> args = get_winograd_args(5);
Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle());
check_winograd("1:4:32", checker, args);
}
TEST_F(FALLBACK_MULTI_THREADS, CONVBIAS_GI_WINOGRAD_PREPROCESS_NCHW44) {
using namespace conv_bias;
std::vector<TestArg> nchw44_args = conv_bias::get_nchw44_conv_bias_args(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册