提交 3117bfb7 编写于 作者: M Megvii Engine Team 提交者: Xu Xinran

fix(dnn/arm): nchw44 direct int8 support 8832

GitOrigin-RevId: 696fa05d943b28fcec3a236bb8518fb255eae9db
上级 4e0c9ad3
......@@ -38,23 +38,6 @@ public:
const NCBKernSizeParam& param) const override;
};
class ConvBiasImpl::AlgoS8DirectStride1NCHW44 final : public AlgoBase {
public:
AlgoS8DirectStride1NCHW44() {}
bool is_reproducible() const override { return true; }
const char* name() const override { return "S8_NCHW44_DIRECT_STRD1"; }
bool usable(fallback::ConvBiasImpl* opr, const NCBKernSizeParam& param,
AlgoSelectionStrategy algo_selection_strategy) const override;
size_t get_workspace(fallback::ConvBiasImpl*,
const NCBKernSizeParam& param) const override;
virtual SmallVector<NCBKern> dispatch_kerns(
fallback::ConvBiasImpl* opr,
const NCBKernSizeParam& param) const override;
bool is_preferred(megdnn::fallback::ConvBiasImpl*,
const NCBKernSizeParam& param) const override;
};
class ConvBiasImpl::AlgoS8DirectStride2 final : public AlgoBase {
bool m_large_group;
......@@ -74,11 +57,11 @@ public:
const NCBKernSizeParam& param) const override;
};
class ConvBiasImpl::AlgoS8DirectStride2NCHW44 final : public AlgoBase {
class ConvBiasImpl::AlgoS8DirectNCHW44 final : public AlgoBase {
public:
AlgoS8DirectStride2NCHW44() {}
AlgoS8DirectNCHW44() {}
bool is_reproducible() const override { return true; }
const char* name() const override { return "S8_NCHW44_DIRECT_STRD2"; }
const char* name() const override { return "S8_NCHW44_DIRECT"; }
bool usable(fallback::ConvBiasImpl* opr, const NCBKernSizeParam& param,
AlgoSelectionStrategy algo_selection_strategy) const override;
size_t get_workspace(fallback::ConvBiasImpl*,
......@@ -245,8 +228,8 @@ private:
//=======================input int8 compute fp32 output int8============
class ConvBiasImpl::AlgoS8CF32WinogradF23_4x4_NCHW44 final : public AlgoBase {
public:
AlgoS8CF32WinogradF23_4x4_NCHW44(fallback::MatrixMulImpl::AlgoBase* matmul_algo,
uint32_t tile_size)
AlgoS8CF32WinogradF23_4x4_NCHW44(
fallback::MatrixMulImpl::AlgoBase* matmul_algo, uint32_t tile_size)
: m_matmul_algo{matmul_algo}, m_tile_size{tile_size} {}
bool is_reproducible() const override { return true; }
const char* name() const override {
......@@ -277,7 +260,7 @@ private:
class ConvBiasImpl::AlgoS8WinogradF23_8x8_NCHW44 final : public AlgoBase {
public:
AlgoS8WinogradF23_8x8_NCHW44(fallback::MatrixMulImpl::AlgoBase* matmul_algo,
uint32_t tile_size)
uint32_t tile_size)
: m_matmul_algo{matmul_algo}, m_tile_size{tile_size} {}
bool is_reproducible() const override { return true; }
const char* name() const override {
......
......@@ -36,26 +36,6 @@ KERN(stride2, 7, nchw)
#undef KERN
#define KERN(stride, i, layout) \
template <BiasMode bias_mode, typename Op, int remain_w> \
void conv_direct_##stride##_##i##x##i##_int8_##layout( \
const int8_t* src, const int8_t* filter, const int32_t* bias, \
int32_t* temp, int8_t* dst, const size_t OC, const size_t IC, \
const size_t IH, const size_t IW, const size_t OH, \
const size_t OW, const Op& op);
KERN(stride1, 2, nchw44)
KERN(stride1, 3, nchw44)
KERN(stride1, 5, nchw44)
KERN(stride1, 7, nchw44)
KERN(stride2, 2, nchw44)
KERN(stride2, 3, nchw44)
KERN(stride2, 5, nchw44)
KERN(stride2, 7, nchw44)
#undef KERN
void nchw44_pack_filter(const int8_t* src, int8_t* dst, int filter);
void nchw44_pack_src(const int8_t* src, int8_t* dst, int length);
} // namespace conv_bias
} // namespace arm_common
} // namespace megdnn
......
此差异已折叠。
/**
* \file dnn/src/arm_common/conv_bias/int8/direct_stride1_nchw44_algo.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "megdnn/oprs.h"
#include "src/arm_common/conv_bias/int8/algos.h"
#include "src/arm_common/conv_bias/int8/direct.h"
#include "src/arm_common/conv_bias/int8/strategy.h"
#include "src/arm_common/elemwise_op.h"
#include "src/common/opr_delegate.h"
#include "midout.h"
using namespace megdnn;
using namespace arm_common;
using conv_fun = std::function<void(
WorkspaceBundle bundle, const ConvBiasImpl::NCBKernParam& kern_param,
const ConvBiasImpl::NCBKernIndex& ncb_index,
const CpuNDRange& workspace_ids, const CpuNDRange& ncb_range)>;
MIDOUT_DECL(megdnn_arm_common_conv_bias_int8_nchw44_stride1)
static void get_rectified_size(
const megdnn::fallback::ConvBiasImpl::NCBKernSizeParam& param,
size_t& IH2, size_t& IW2, size_t& OH2, size_t& OW2) {
auto&& fm = param.filter_meta;
auto SW = fm.stride[1];
auto OH = param.osz[0];
auto OW = param.osz[1];
auto FH = fm.spatial[0];
auto FW = fm.spatial[1];
OH2 = OH;
OW2 = (OW + 7) & ~7;
IH2 = SW * OH + FH - SW;
IW2 = SW * OW2 + FW - SW;
}
static WorkspaceBundle get_bundle(const ConvBiasImpl::NCBKernSizeParam& param) {
constexpr size_t src_expand = 4;
auto&& fm = param.filter_meta;
size_t group = fm.group;
size_t batch = param.n;
size_t IC = fm.icpg;
size_t OC = fm.ocpg;
size_t FH = fm.spatial[0];
size_t FW = fm.spatial[1];
size_t IH2, IW2, OH2, OW2;
get_rectified_size(param, IH2, IW2, OH2, OW2);
if (group == 1) {
size_t src_size =
batch * group * IC * IH2 * IW2 * sizeof(int8_t) * src_expand;
size_t weight_size = group * OC * IC * FH * FW * sizeof(int8_t);
return {nullptr, {src_size, weight_size}};
} else {
size_t src_size =
param.nr_threads * IC * IH2 * IW2 * sizeof(int8_t) * src_expand;
size_t weight_size = group * OC * IC * FH * FW * sizeof(int8_t);
return {nullptr, {src_size, weight_size}};
}
};
static void copy_padding_kern(WorkspaceBundle bundle,
const ConvBiasImpl::NCBKernParam& kern_param,
const ConvBiasImpl::NCBKernIndex& ncb_index,
const CpuNDRange& workspace_ids) {
size_t IH = kern_param.isz[0];
size_t IW = kern_param.isz[1];
size_t IC = kern_param.filter_meta.icpg;
size_t PH = kern_param.filter_meta.padding[0];
size_t PW = kern_param.filter_meta.padding[1];
size_t GROUP = kern_param.filter_meta.group;
size_t IH2, IW2, OH2, OW2;
get_rectified_size(kern_param, IH2, IW2, OH2, OW2);
size_t padding_group_size = IH2 * IW2 * IC;
bundle.set(kern_param.workspace_ptr);
//! Used for get the workspace offset
constexpr int pack_ic = 4;
constexpr int expend_element = 4;
// TODO: block dim is better to get from arg
size_t workspace_ic_block = 4;
size_t workspace_batch_id = workspace_ids[0];
size_t workspace_group_id = workspace_ids[1];
size_t workspace_ic_id = workspace_ids[2];
size_t workspace_ic = workspace_ic_id * workspace_ic_block;
size_t batch_id = ncb_index.ndrange_id[0];
size_t group_id = ncb_index.ndrange_id[1];
size_t group_pack_size = 1;
int nr_pad_h = PH * IW2 * pack_ic * expend_element;
int nr_pad_w = PW * pack_ic * expend_element;
int over_pad = std::max(0_z, IW2 - IW - 2 * PW) * pack_ic * expend_element;
//! copy to sptr_base to eliminate padding effect
const int8_t* sptr = static_cast<const int8_t*>(kern_param.src<int8_t>(
batch_id, group_id, workspace_ic_id, group_pack_size, pack_ic));
int8_t* sptr_base = static_cast<int8_t*>(bundle.get(0)) +
(workspace_batch_id * GROUP * padding_group_size +
workspace_group_id * padding_group_size +
workspace_ic * IH2 * IW2) *
expend_element;
size_t nr_ic = workspace_ic_block;
if (GROUP > 1) {
nr_ic = IC;
}
rep_step(ic_idx, nr_ic, pack_ic) {
std::memset(sptr_base, 0, nr_pad_h * sizeof(int8_t));
sptr_base += nr_pad_h;
rep(ih_idx, IH) {
std::memset(sptr_base, 0, nr_pad_w * sizeof(int8_t));
sptr_base += nr_pad_w;
conv_bias::nchw44_pack_src(sptr, sptr_base, IW);
sptr_base += IW * pack_ic * expend_element;
sptr += IW * pack_ic;
std::memset(sptr_base, 0, (nr_pad_w + over_pad) * sizeof(int8_t));
sptr_base += nr_pad_w + over_pad;
}
std::memset(sptr_base, 0, nr_pad_h * sizeof(int8_t));
sptr_base += nr_pad_h;
}
}
template <size_t filter, BiasMode bias_mode, typename Op, int ow_remain>
static void do_conv_kern(WorkspaceBundle bundle,
const ConvBiasImpl::NCBKernParam& kern_param,
const ConvBiasImpl::NCBKernIndex& ncb_index,
const CpuNDRange& workspace_ids,
const CpuNDRange& ncb_range) {
size_t OH = kern_param.osz[0];
size_t OW = kern_param.osz[1];
size_t FH = kern_param.filter_meta.spatial[0];
size_t FW = kern_param.filter_meta.spatial[1];
size_t IC = kern_param.filter_meta.icpg;
size_t OC = kern_param.filter_meta.ocpg;
size_t GROUP = kern_param.filter_meta.group;
size_t IH2, IW2, OH2, OW2;
get_rectified_size(kern_param, IH2, IW2, OH2, OW2);
bool need_post_process =
kern_param.dst_type.enumv() == DTypeEnum::QuantizedS8;
//! if dst_type is qint32, the op is not used, just fill with (1.0f,4.0f)
Op op = Op(1.0f, 4.0f);
if (need_post_process) {
float scale_bias =
kern_param.bias_type.param<dtype::QuantizedS32>().scale;
float scale_dst = kern_param.dst_type.param<dtype::QuantizedS8>().scale;
op = Op(scale_bias, scale_dst);
}
size_t padding_group_size = IH2 * IW2 * IC;
bundle.set(kern_param.workspace_ptr);
constexpr size_t pack_c = 4;
constexpr size_t src_expand_size = 4;
const size_t workspace_batch_id = workspace_ids[0];
const size_t workspace_group_id = workspace_ids[1];
const size_t batch_id = ncb_index.ndrange_id[0];
const size_t group_id = ncb_index.ndrange_id[1];
const size_t oc_id = ncb_index.ndrange_id[2];
const size_t oc_block_num = ncb_range[2];
size_t nr_pack_per_step = div_ceil(div_ceil(OC, pack_c), oc_block_num);
size_t oc_block = nr_pack_per_step * pack_c;
const size_t oc_idx = oc_id * oc_block;
if (oc_id == (oc_block_num - 1)) {
oc_block = OC - oc_id * nr_pack_per_step * pack_c;
}
megdnn_assert(oc_block % pack_c == 0,
"oc must be devisible by 4, but oc = %zu", oc_block);
const int8_t* sptr =
static_cast<int8_t*>(bundle.get(0)) +
workspace_batch_id * GROUP * padding_group_size * src_expand_size +
workspace_group_id * padding_group_size * src_expand_size;
const int8_t* fptr =
kern_param.filter<dt_int8>(group_id) + oc_idx * FH * FW * IC;
void* dst = reinterpret_cast<void*>(
reinterpret_cast<ptrdiff_t>(
kern_param.dst<void>(batch_id, group_id)) +
oc_idx * OH * OW);
const int32_t* bptr =
kern_param.bias<dt_int32>(batch_id, group_id) + oc_idx;
auto packed_weight = reinterpret_cast<int8_t*>(bundle.get(1)) +
group_id * OC * IC * FH * FW + oc_idx * IC * FH * FW;
conv_bias::nchw44_pack_filter(fptr, packed_weight,
oc_block / 4 * IC / 4 * FH * FW);
#define KERN1_NCHW44_CONV(filter) \
conv_bias::conv_direct_stride1_##filter##x##filter##_int8_nchw44< \
bias_mode, Op, ow_remain>(sptr, packed_weight, bptr, nullptr, \
static_cast<int8_t*>(dst), oc_block, IC, \
IH2, IW2, OH, OW, op)
DISPATCH_FILTER(filter, KERN1_NCHW44_CONV)
#undef KERN1_NCHW44_CONV
}
/* ===================== stride1 algo ===================== */
bool ConvBiasImpl::AlgoS8DirectStride1NCHW44::usable(
fallback::ConvBiasImpl*, const NCBKernSizeParam& param,
AlgoSelectionStrategy algo_selection_strategy) const {
MEGDNN_MARK_USED_VAR(algo_selection_strategy);
auto&& fm = param.filter_meta;
auto FH = fm.spatial[0];
auto OC = fm.ocpg;
auto IC = fm.icpg;
bool avaible = //! src and filter are qint8, dst is qint8 or qint32
((param.src_type.enumv() == DTypeEnum::QuantizedS8 &&
param.filter_type.enumv() == DTypeEnum::QuantizedS8 &&
(param.dst_type.enumv() == DTypeEnum::QuantizedS8 ||
param.dst_type.enumv() == DTypeEnum::QuantizedS32))) &&
(fm.format == param::Convolution::Format::NCHW44) &&
(OC % 4 == 0 && IC % 4 == 0 && OC >= 4) && !fm.should_flip &&
fm.spatial_ndim == 2 && fm.dilation[0] == 1 &&
fm.dilation[1] == 1 && fm.stride[0] == 1 && fm.stride[1] == 1 &&
FH == fm.spatial[1] && (FH == 2 || FH == 3 || FH == 5 || FH == 7) &&
param.bias_mode != BiasMode::BIAS;
return avaible;
}
bool ConvBiasImpl::AlgoS8DirectStride1NCHW44::is_preferred(
megdnn::fallback::ConvBiasImpl* conv_bias_impl_ptr,
const NCBKernSizeParam& param) const {
// TODO: benchmark and fix
MEGDNN_MARK_USED_VAR(conv_bias_impl_ptr);
MEGDNN_MARK_USED_VAR(param);
return false;
}
size_t ConvBiasImpl::AlgoS8DirectStride1NCHW44::get_workspace(
fallback::ConvBiasImpl*, const NCBKernSizeParam& param) const {
return get_bundle(param).total_size_in_bytes();
}
SmallVector<ConvBiasImpl::NCBKern>
ConvBiasImpl::AlgoS8DirectStride1NCHW44::dispatch_kerns(
fallback::ConvBiasImpl*, const NCBKernSizeParam& param) const {
auto fm = param.filter_meta;
size_t N = param.n;
size_t IC = fm.icpg;
size_t OC = fm.ocpg;
size_t OW = param.osz[1];
size_t group = fm.group;
size_t fh = fm.spatial[0];
size_t fw = fm.spatial[1];
WorkspaceBundle wbundle = get_bundle(param);
conv_fun do_conv_fun = nullptr;
int ow_remain = OW % 8;
// NOTE: remain_w is not used to gen hash of midout for compatible with changing
// shape runtime
#define DO_CONV_KERN_FUN(filter, bias_mode, remain_w, op) \
MIDOUT_BEGIN(megdnn_arm_common_conv_bias_int8_nchw44_stride1, \
midout_iv(#filter #bias_mode #op##_hash)) { \
do_conv_fun = do_conv_kern<filter, bias_mode, op, remain_w>; \
} \
MIDOUT_END();
#define GET_OP_PARAM(filter, bias_mode, remain_w) \
switch (param.nonlineMode) { \
case param::ConvBias::NonlineMode::IDENTITY: \
DO_CONV_KERN_FUN(filter, bias_mode, remain_w, \
TypeCvtOp<dt_qint32 MEGDNN_COMMA dt_qint8>) \
break; \
case param::ConvBias::NonlineMode::RELU: \
DO_CONV_KERN_FUN(filter, bias_mode, remain_w, \
ReluOp<dt_qint32 MEGDNN_COMMA dt_qint8>) \
break; \
case param::ConvBias::NonlineMode::H_SWISH: \
DO_CONV_KERN_FUN(filter, bias_mode, remain_w, \
HSwishOp<dt_qint32 MEGDNN_COMMA dt_qint8>) \
break; \
default: \
megdnn_assert(0); \
break; \
}
#define GET_REMAIN_W_PARAM(filter, bias_mode) \
switch (ow_remain) { \
case 0: \
GET_OP_PARAM(filter, bias_mode, 0); \
break; \
case 1: \
GET_OP_PARAM(filter, bias_mode, 1); \
break; \
case 2: \
GET_OP_PARAM(filter, bias_mode, 2); \
break; \
case 3: \
GET_OP_PARAM(filter, bias_mode, 3); \
break; \
case 4: \
GET_OP_PARAM(filter, bias_mode, 4); \
break; \
case 5: \
GET_OP_PARAM(filter, bias_mode, 5); \
break; \
case 6: \
GET_OP_PARAM(filter, bias_mode, 6); \
break; \
case 7: \
GET_OP_PARAM(filter, bias_mode, 7); \
break; \
default: \
megdnn_assert(0); \
}
#define GET_BIAS_MODE_PARAM(filter) \
switch (param.bias_mode) { \
case BiasMode::NO_BIAS: \
GET_REMAIN_W_PARAM(filter, BiasMode::NO_BIAS) \
break; \
case BiasMode::BROADCAST_CHANNEL_BIAS: \
GET_REMAIN_W_PARAM(filter, BiasMode::BROADCAST_CHANNEL_BIAS) \
break; \
default: \
megdnn_assert(0); \
break; \
}
#define DISPATCH_CONV_KERN() \
switch (param.filter_meta.spatial[0]) { \
case 2: \
GET_BIAS_MODE_PARAM(2) \
break; \
case 3: \
GET_BIAS_MODE_PARAM(3) \
break; \
case 5: \
GET_BIAS_MODE_PARAM(5) \
break; \
case 7: \
GET_BIAS_MODE_PARAM(7) \
break; \
default: \
megdnn_assert(0); \
break; \
}
DISPATCH_CONV_KERN();
#undef DO_CONV_KERN_FUN
#undef GET_REMAIN_W_PARAM
#undef GET_OP_PARAM
#undef GET_BIAS_MODE_PARAM
#undef DISPATCH_CONV_KERN
megdnn_assert(do_conv_fun);
SmallVector<ConvBiasImpl::NCBKern> ret_kerns;
WorkspaceBundle bundle = wbundle;
constexpr size_t pack_oc = 4;
size_t oc_step = pack_oc;
if (fh == 2 && fw == 2 && OC >= 8) {
oc_step = 8;
}
if (group == 1) {
CpuNDRange ncb_range = {N, group, div_ceil(OC, oc_step)};
auto copy_padding = [bundle](const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) {
copy_padding_kern(bundle, kern_param, ncb_index,
ncb_index.ndrange_id);
};
constexpr size_t pack_ic = 4;
ret_kerns.push_back({copy_padding, {N, group, div_ceil(IC, pack_ic)}});
auto do_conv = [bundle, do_conv_fun, ncb_range](
const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) {
do_conv_fun(bundle, kern_param, ncb_index, ncb_index.ndrange_id,
ncb_range);
};
ret_kerns.push_back({do_conv, ncb_range});
} else {
CpuNDRange ncb_range = {N, group, 1};
auto do_conv = [bundle, do_conv_fun, ncb_range](
const NCBKernParam& kern_param,
const NCBKernIndex& ncb_index) {
copy_padding_kern(bundle, kern_param, ncb_index,
{0, ncb_index.thread_id, 0});
do_conv_fun(bundle, kern_param, ncb_index,
{0, ncb_index.thread_id, 0}, ncb_range);
};
ret_kerns.push_back({do_conv, ncb_range});
}
return ret_kerns;
}
// vim: syntax=cpp.doxygen
......@@ -46,11 +46,10 @@ class ConvBiasImpl::AlgoPack : NonCopyableObj {
AlgoQU8DirectStride1 qu8_direct_stride1_small_group{false};
AlgoS8DirectStride2 s8_direct_stride2_large_group{true};
AlgoS8DirectStride2 s8_direct_stride2_small_group{false};
AlgoS8DirectStride2NCHW44 s8_direct_stride2_nchw44;
AlgoS8DirectNCHW44 s8_direct_nchw44;
AlgoS8DirectNCHWNCHW44 s8_direct_nchw_nchw44;
AlgoS8DirectStride1 s8_direct_stride1_large_group{true};
AlgoS8DirectStride1 s8_direct_stride1_small_group{false};
AlgoS8DirectStride1NCHW44 s8_direct_stride1_nchw44;
AlgoS8ChanWiseStride1NCHW44 s8_channel_wise_stride1_nchw44;
AlgoS8ChanWiseStride2NCHW44 s8_channel_wise_stride2_nchw44;
......@@ -114,11 +113,10 @@ public:
direct_algos.emplace_back(&qu8_direct_stride1_small_group);
direct_algos.emplace_back(&s8_direct_stride2_large_group);
direct_algos.emplace_back(&s8_direct_stride2_small_group);
direct_algos.emplace_back(&s8_direct_stride2_nchw44);
direct_algos.emplace_back(&s8_direct_nchw44);
direct_algos.emplace_back(&s8_direct_nchw_nchw44);
direct_algos.emplace_back(&s8_direct_stride1_large_group);
direct_algos.emplace_back(&s8_direct_stride1_small_group);
direct_algos.emplace_back(&s8_direct_stride1_nchw44);
direct_algos.emplace_back(&s8_channel_wise_stride1_nchw44);
direct_algos.emplace_back(&s8_channel_wise_stride2_nchw44);
......
......@@ -37,9 +37,8 @@ protected:
private:
class AlgoS8DirectStride1;
class AlgoS8DirectStride1NCHW44;
class AlgoS8DirectStride2;
class AlgoS8DirectStride2NCHW44;
class AlgoS8DirectNCHW44;
class AlgoS8DirectNCHWNCHW44;
class AlgoQU8DirectStride1;
class AlgoQU8DirectStride2;
......
......@@ -27,6 +27,8 @@ struct NoneOp;
#define OP(_ctype, _neon_type, _neon_type2, _func_suffix, _simd_width) \
template <> \
struct NoneOp<_ctype> : NoneOpBase<_ctype> { \
NoneOp(){}; \
NoneOp(float, float){}; \
using NoneOpBase::NoneOpBase; \
using NoneOpBase::operator(); \
constexpr static size_t SIMD_WIDTH = _simd_width; \
......
......@@ -226,7 +226,15 @@ static void benchmark_convbias(Handle* handle, std::string int_name,
run(1, 3, 32, 224, 224, 5, 1, true);
run(1, 3, 64, 224, 224, 7, 1, true);
for (size_t stride : {1, 2}) {
run(1, 64, 128, 56, 56, 3, 2, false);
run(1, 128, 256, 28, 28, 3, 2, false);
run(1, 256, 512, 14, 14, 3, 2, false);
run(1, 128, 128, 28, 28, 3, 1, false);
run(1, 256, 256, 14, 14, 3, 1, false);
run(1, 512, 512, 7, 7, 3, 1, false);
for (size_t stride : {1}) {
printf("stride %zu\n", stride);
for (size_t filter_size : {2, 3, 5, 7}) {
for (size_t img_size : {32}) {
......
......@@ -527,12 +527,22 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE2_SMALL_GROUP) {
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE1_NCHW44) {
checker_conv_bias_qint8x8x8(
get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false, false),
handle(), "S8_NCHW44_DIRECT_STRD1");
handle(), "S8_NCHW44_DIRECT");
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE1_NCHW44_8832) {
checker_conv_bias_qint8x8x32(
get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false, true),
handle(), "S8_NCHW44_DIRECT");
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE2_NCHW44_8832) {
checker_conv_bias_qint8x8x32(
get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false, true),
handle(), "S8_NCHW44_DIRECT");
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE2_NCHW44) {
checker_conv_bias_qint8x8x8(
get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false, false),
handle(), "S8_NCHW44_DIRECT_STRD2");
handle(), "S8_NCHW44_DIRECT");
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_QS8_CHANNEL_WISE_DIRECT1_NCHW44) {
checker_conv_bias_qint8x8x8(
......@@ -1085,7 +1095,6 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_MK_PACKED_INT8) {
dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8) {
using namespace conv_bias;
......@@ -1096,17 +1105,17 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8) {
param::MatrixMul::Format format, float eps) {
for (auto&& arg : args) {
for (uint32_t m : out_size) {
checker.set_extra_opr_impl(std::bind(
winograd_algo_extra_impl, std::placeholders::_1, m,
arg.param, handle, format));
checker.set_dtype(0, A_dtype)
.set_dtype(1, B_dtype)
.set_dtype(2, C_dtype)
.set_dtype(4, D_dtype)
.set_epsilon(eps)
.set_param(arg.param)
.execs({arg.src, arg.filter, arg.bias, {}, {}});
}
checker.set_extra_opr_impl(std::bind(
winograd_algo_extra_impl, std::placeholders::_1, m,
arg.param, handle, format));
checker.set_dtype(0, A_dtype)
.set_dtype(1, B_dtype)
.set_dtype(2, C_dtype)
.set_dtype(4, D_dtype)
.set_epsilon(eps)
.set_param(arg.param)
.execs({arg.src, arg.filter, arg.bias, {}, {}});
}
}
};
......@@ -1118,7 +1127,7 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8) {
checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
ssprintf("WINOGRAD_NCHW44:%s:8:2:32", matmul_name).c_str()));
std::vector<TestArg> quantized_args = get_int8_nchw44_args (3,4);
std::vector<TestArg> quantized_args = get_int8_nchw44_args(3, 4);
UniformIntRNG int_rng{-50, 50};
checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
run(handle(), quantized_args, {2}, dtype::QuantizedS8(2.5f),
......@@ -1126,8 +1135,8 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8) {
dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_GROUPMODE) {
TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_GROUPMODE) {
using namespace conv_bias;
Checker<ConvBiasForward> checker(handle());
......@@ -1137,17 +1146,17 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_GROUPM
param::MatrixMul::Format format, float eps) {
for (auto&& arg : args) {
for (uint32_t m : out_size) {
checker.set_extra_opr_impl(std::bind(
winograd_algo_extra_impl, std::placeholders::_1, m,
arg.param, handle, format));
checker.set_dtype(0, A_dtype)
.set_dtype(1, B_dtype)
.set_dtype(2, C_dtype)
.set_dtype(4, D_dtype)
.set_epsilon(eps)
.set_param(arg.param)
.execs({arg.src, arg.filter, arg.bias, {}, {}});
}
checker.set_extra_opr_impl(std::bind(
winograd_algo_extra_impl, std::placeholders::_1, m,
arg.param, handle, format));
checker.set_dtype(0, A_dtype)
.set_dtype(1, B_dtype)
.set_dtype(2, C_dtype)
.set_dtype(4, D_dtype)
.set_epsilon(eps)
.set_param(arg.param)
.execs({arg.src, arg.filter, arg.bias, {}, {}});
}
}
};
......@@ -1168,7 +1177,8 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_GROUPM
dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32) {
TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32) {
using namespace conv_bias;
Checker<ConvBiasForward> checker(handle());
......@@ -1196,21 +1206,22 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F
#if MEGDNN_AARCH64
const char* matmul_name = "AARCH64_F32_MK4_4x16";
#else
const char* matmul_name = "ARMV7_F32_MK4_4x8";
const char* matmul_name = "ARMV7_F32_MK4_4x8";
#endif
checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
ssprintf("WINOGRAD_NCHW44:%s:4:2:32", matmul_name).c_str()));
std::vector<TestArg> quantized_args =
get_int8_nchw44_args(3, 4, true);
std::vector<TestArg> quantized_args = get_int8_nchw44_args(3, 4, true);
UniformIntRNG int_rng{-50, 50};
checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
run(handle(), quantized_args, {2}, dtype::QuantizedS8(0.41113496f),
dtype::QuantizedS8(0.01887994f),
dtype::QuantizedS32(0.41113496f * 0.01887994f),
dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4, epsilon);
dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4,
epsilon);
}
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32_GROUPMODE) {
TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32_GROUPMODE) {
using namespace conv_bias;
Checker<ConvBiasForward> checker(handle());
......@@ -1238,7 +1249,7 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F
#if MEGDNN_AARCH64
const char* matmul_name = "AARCH64_F32_MK4_4x16";
#else
const char* matmul_name = "ARMV7_F32_MK4_4x8";
const char* matmul_name = "ARMV7_F32_MK4_4x8";
#endif
checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
ssprintf("WINOGRAD_NCHW44:%s:4:2:32", matmul_name).c_str()));
......@@ -1249,10 +1260,10 @@ TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F
run(handle(), quantized_args, {2}, dtype::QuantizedS8(0.41113496f),
dtype::QuantizedS8(0.01887994f),
dtype::QuantizedS32(0.41113496f * 0.01887994f),
dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4, epsilon);
dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4,
epsilon);
}
#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_F23) {
using namespace conv_bias;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册