提交 be205727 编写于 作者: M Megvii Engine Team 提交者: Xu Xinran

fix(mge): fix some warnings

GitOrigin-RevId: 38b285f991fd92db70b5ed0c320823b7aef09862
上级 5b87e8a8
...@@ -90,12 +90,11 @@ inline int8x16_t vqtbl1q_s8_v7(int8x16_t a, uint8x16_t index) { ...@@ -90,12 +90,11 @@ inline int8x16_t vqtbl1q_s8_v7(int8x16_t a, uint8x16_t index) {
_sum1##_c_idx = vdotq_s32(_sum1##_c_idx, _k##_k2_idx, _elem); _sum1##_c_idx = vdotq_s32(_sum1##_c_idx, _k##_k2_idx, _elem);
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_2x2_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride1_2x2_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - OW; const size_t tail_step = IW - OW;
const uint8x16_t _idx0 = {0, 1, 16, 16, 1, 2, 16, 16, const uint8x16_t _idx0 = {0, 1, 16, 16, 1, 2, 16, 16,
2, 3, 16, 16, 3, 4, 16, 16}; 2, 3, 16, 16, 3, 4, 16, 16};
...@@ -326,12 +325,11 @@ void conv_bias::conv_direct_stride1_2x2_int8_dot(const int8_t* src, ...@@ -326,12 +325,11 @@ void conv_bias::conv_direct_stride1_2x2_int8_dot(const int8_t* src,
} }
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_3x3_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride1_3x3_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - OW; const size_t tail_step = IW - OW;
const uint8x16_t _idx0 = {0, 1, 2, 16, 1, 2, 3, 16, const uint8x16_t _idx0 = {0, 1, 2, 16, 1, 2, 3, 16,
...@@ -562,12 +560,11 @@ void conv_bias::conv_direct_stride1_3x3_int8_dot(const int8_t* src, ...@@ -562,12 +560,11 @@ void conv_bias::conv_direct_stride1_3x3_int8_dot(const int8_t* src,
} }
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_2x2_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride2_2x2_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - 2 * OW + IW; const size_t tail_step = IW - 2 * OW + IW;
const uint8x16_t _idx0 = {0, 1, 16, 16, 2, 3, 16, 16, const uint8x16_t _idx0 = {0, 1, 16, 16, 2, 3, 16, 16,
...@@ -658,12 +655,11 @@ void conv_bias::conv_direct_stride2_2x2_int8_dot(const int8_t* src, ...@@ -658,12 +655,11 @@ void conv_bias::conv_direct_stride2_2x2_int8_dot(const int8_t* src,
} }
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_3x3_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride2_3x3_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - 2 * OW + IW; const size_t tail_step = IW - 2 * OW + IW;
const uint8x16_t _idx0 = {0, 1, 2, 16, 2, 3, 4, 16, const uint8x16_t _idx0 = {0, 1, 2, 16, 2, 3, 4, 16,
...@@ -814,12 +810,11 @@ void conv_bias::conv_direct_stride2_3x3_int8_dot(const int8_t* src, ...@@ -814,12 +810,11 @@ void conv_bias::conv_direct_stride2_3x3_int8_dot(const int8_t* src,
_sum1##_c_idx = vdotq_s32(_sum1##_c_idx, _k##_k11_idx, _elem); _sum1##_c_idx = vdotq_s32(_sum1##_c_idx, _k##_k11_idx, _elem);
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_5x5_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride2_5x5_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - 2 * OW + IW; const size_t tail_step = IW - 2 * OW + IW;
const uint8x16_t _idx00 = {0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9}; const uint8x16_t _idx00 = {0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9};
...@@ -1113,12 +1108,11 @@ void conv_bias::conv_direct_stride2_5x5_int8_dot(const int8_t* src, ...@@ -1113,12 +1108,11 @@ void conv_bias::conv_direct_stride2_5x5_int8_dot(const int8_t* src,
} }
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_7x7_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride2_7x7_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - 2 * OW + IW; const size_t tail_step = IW - 2 * OW + IW;
const uint8x16_t _idx00 = {0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9}; const uint8x16_t _idx00 = {0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9};
...@@ -1476,12 +1470,11 @@ void conv_bias::conv_direct_stride2_7x7_int8_dot(const int8_t* src, ...@@ -1476,12 +1470,11 @@ void conv_bias::conv_direct_stride2_7x7_int8_dot(const int8_t* src,
} }
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_5x5_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride1_5x5_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - OW; const size_t tail_step = IW - OW;
const uint8x16_t _idx00 = {0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6}; const uint8x16_t _idx00 = {0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6};
...@@ -1777,12 +1770,11 @@ void conv_bias::conv_direct_stride1_5x5_int8_dot(const int8_t* src, ...@@ -1777,12 +1770,11 @@ void conv_bias::conv_direct_stride1_5x5_int8_dot(const int8_t* src,
} }
template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op> template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_7x7_int8_dot(const int8_t* src, void conv_bias::conv_direct_stride1_7x7_int8_dot(
const int8_t* filter, const int8_t* src, const int8_t* filter, const int32_t* bias,
const int32_t* bias, int32_t* temp, int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
int8_t* dst, const size_t IH, const size_t OH, const size_t OW, const Op& op) {
const size_t IW, const size_t OH, MEGDNN_MARK_USED_VAR(IH);
const size_t OW, const Op& op) {
const size_t tail_step = IW - OW; const size_t tail_step = IW - OW;
const uint8x16_t _idx00 = {0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6}; const uint8x16_t _idx00 = {0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6};
......
...@@ -29,6 +29,7 @@ void copy_packed_src_int8_nchw44<1>(int8_t* dst, const int dst_step, ...@@ -29,6 +29,7 @@ void copy_packed_src_int8_nchw44<1>(int8_t* dst, const int dst_step,
const int ih, const int pad_left, const int ih, const int pad_left,
const int pad_right, const int pad_top, const int pad_right, const int pad_top,
const int pad_bottom) { const int pad_bottom) {
MEGDNN_MARK_USED_VAR(pad_right);
constexpr int IC_PACK_SIZE = 4; constexpr int IC_PACK_SIZE = 4;
rep_step(ic_idx, ic, IC_PACK_SIZE) { rep_step(ic_idx, ic, IC_PACK_SIZE) {
const int8_t* i_src = src + ic_idx * ic_step; const int8_t* i_src = src + ic_idx * ic_step;
...@@ -66,6 +67,7 @@ void copy_packed_src_int8_nchw44<2>(int8_t* dst, const int dst_step, ...@@ -66,6 +67,7 @@ void copy_packed_src_int8_nchw44<2>(int8_t* dst, const int dst_step,
const int ih, const int pad_left, const int ih, const int pad_left,
const int pad_right, const int pad_top, const int pad_right, const int pad_top,
const int pad_bottom) { const int pad_bottom) {
MEGDNN_MARK_USED_VAR(pad_right);
constexpr int IC_PACK_SIZE = 4; constexpr int IC_PACK_SIZE = 4;
int odd_start = megdnn::div_ceil(dst_step, 2); int odd_start = megdnn::div_ceil(dst_step, 2);
bool nochange = pad_left % 2 == 0; bool nochange = pad_left % 2 == 0;
......
...@@ -163,6 +163,7 @@ static void conv_kern(WorkspaceBundle bundle, ...@@ -163,6 +163,7 @@ static void conv_kern(WorkspaceBundle bundle,
bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::usable( bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::usable(
FallbackConvBiasImpl*, const NCBKernSizeParam& param, FallbackConvBiasImpl*, const NCBKernSizeParam& param,
AlgoSelectionStrategy algo_selection_strategy) const { AlgoSelectionStrategy algo_selection_strategy) const {
MEGDNN_MARK_USED_VAR(algo_selection_strategy);
auto&& fm = param.filter_meta; auto&& fm = param.filter_meta;
auto FH = fm.spatial[0]; auto FH = fm.spatial[0];
auto FW = fm.spatial[1]; auto FW = fm.spatial[1];
...@@ -199,6 +200,7 @@ bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::usable( ...@@ -199,6 +200,7 @@ bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::usable(
bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::is_preferred( bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::is_preferred(
megdnn::fallback::ConvBiasImpl*, const NCBKernSizeParam& param) const { megdnn::fallback::ConvBiasImpl*, const NCBKernSizeParam& param) const {
MEGDNN_MARK_USED_VAR(param);
return true; return true;
} }
......
...@@ -98,6 +98,7 @@ template <int ow_remain, typename Op, typename T> ...@@ -98,6 +98,7 @@ template <int ow_remain, typename Op, typename T>
struct StoreOCxOWx<1, ow_remain, Op, T> { struct StoreOCxOWx<1, ow_remain, Op, T> {
static void impl(int32x4_t res[][8], const Op& op, T* dst_ptr, static void impl(int32x4_t res[][8], const Op& op, T* dst_ptr,
const int ld_dst_oc) { const int ld_dst_oc) {
MEGDNN_MARK_USED_VAR(ld_dst_oc);
switch (ow_remain) { switch (ow_remain) {
case 8: case 8:
UNROLL_CALL_RAW(4, cb12); UNROLL_CALL_RAW(4, cb12);
......
...@@ -337,14 +337,11 @@ ConvBias::WinogradParam ConvBias::parse_winograd_name( ...@@ -337,14 +337,11 @@ ConvBias::WinogradParam ConvBias::parse_winograd_name(
&(ret.channel_block_size), &(ret.output_block_size), &(ret.channel_block_size), &(ret.output_block_size),
&(ret.tile_size)); &(ret.tile_size));
if (strcmp(name, pre.c_str())) { if (strcmp(name, pre.c_str())) {
megdnn_log_warn("algo %s is not %s algo", name, pre.c_str());
ret = INVALID_WINOGRAD_PARAM; ret = INVALID_WINOGRAD_PARAM;
return false; return false;
} }
if (ret.tile_size == 0 || ret.output_block_size == 0 || if (ret.tile_size == 0 || ret.output_block_size == 0 ||
ret.channel_block_size == 0) { ret.channel_block_size == 0) {
megdnn_log_warn("the algo name %s is not suitable for %s",
algo_name.c_str(), pre.c_str());
ret = INVALID_WINOGRAD_PARAM; ret = INVALID_WINOGRAD_PARAM;
return false; return false;
} }
......
...@@ -795,7 +795,7 @@ bool CpuCompNode::CompNodeImpl::check_global_finalized(const char* reason) { ...@@ -795,7 +795,7 @@ bool CpuCompNode::CompNodeImpl::check_global_finalized(const char* reason) {
/* ======================== CompNode methods ======================== */ /* ======================== CompNode methods ======================== */
CompNode CompNode::default_cpu() { CompNode CompNode::default_cpu() {
static Locator locator{DeviceType::CPU, Locator::DEVICE_CPU_DEFAULT, -1}; static Locator locator{DeviceType::CPU, Locator::DEVICE_CPU_DEFAULT, {-1}};
static auto empty_queue = static auto empty_queue =
std::make_shared<CpuCompNode::WorkerQueue>(locator); std::make_shared<CpuCompNode::WorkerQueue>(locator);
static CpuCompNodeImpl impl{locator, locator, empty_queue}; static CpuCompNodeImpl impl{locator, locator, empty_queue};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册