提交 819a4f1c 编写于 作者: M Megvii Engine Team 提交者: Xinran Xu

feat(cross_build): add cmake cross build ci

GitOrigin-RevId: ada9554bc22f68fcdce34485850f030c7bee2848
上级 d23fec06
......@@ -172,6 +172,7 @@ static void do_conv_kern(WorkspaceBundle bundle,
bool ConvBiasImpl::AlgoS8DirectStride2NCHWNCHW44::usable(
fallback::ConvBiasImpl*, const NCBKernSizeParam& param,
AlgoSelectionStrategy algo_selection_strategy) const {
MEGDNN_MARK_USED_VAR(algo_selection_strategy);
auto&& fm = param.filter_meta;
auto FH = fm.spatial[0];
auto OC = fm.ocpg;
......@@ -193,6 +194,8 @@ bool ConvBiasImpl::AlgoS8DirectStride2NCHWNCHW44::is_preferred(
megdnn::fallback::ConvBiasImpl* conv_bias_impl_ptr,
const NCBKernSizeParam& param) const {
// TODO: benchmark and fix
MEGDNN_MARK_USED_VAR(conv_bias_impl_ptr);
MEGDNN_MARK_USED_VAR(param);
return false;
}
......
......@@ -617,6 +617,7 @@ static void conv_direct_stride2_int8_nchw_nchw44(
int32_t* temp, int8_t* dst, const size_t oc, const size_t ic,
const size_t ih, const size_t iw, const size_t oh, const size_t ow,
const Op& op) {
MEGDNN_MARK_USED_VAR(temp);
constexpr size_t fh = filter_size;
constexpr size_t fw = filter_size;
constexpr size_t ic_step = 1;
......@@ -740,6 +741,18 @@ void conv_bias::conv_direct_stride2_2x2_int8_nchw_nchw44(
int32_t* temp, int8_t* dst, const size_t oc, const size_t ic,
const size_t ih, const size_t iw, const size_t oh, const size_t ow,
const Op& op) {
MEGDNN_MARK_USED_VAR(src);
MEGDNN_MARK_USED_VAR(filter);
MEGDNN_MARK_USED_VAR(bias);
MEGDNN_MARK_USED_VAR(temp);
MEGDNN_MARK_USED_VAR(dst);
MEGDNN_MARK_USED_VAR(oc);
MEGDNN_MARK_USED_VAR(ic);
MEGDNN_MARK_USED_VAR(ih);
MEGDNN_MARK_USED_VAR(iw);
MEGDNN_MARK_USED_VAR(oh);
MEGDNN_MARK_USED_VAR(ow);
MEGDNN_MARK_USED_VAR(op);
megdnn_assert(0, "not imple nchw_nchw44 2x2s2 conv");
}
......
......@@ -141,6 +141,7 @@ template <typename Op, typename T>
struct StoreOcxOw4Remain<1, 0, Op, T> {
static void impl(int32x4_t c[2][4], const Op& op, int8_t* dst_ptr,
int ld_dst_oc) {
MEGDNN_MARK_USED_VAR(ld_dst_oc);
op({{c[0][0], c[0][1]}}, reinterpret_cast<dt_qint8*>(dst_ptr));
op({{c[0][2], c[0][3]}}, reinterpret_cast<dt_qint8*>(dst_ptr + 8));
}
......@@ -149,6 +150,7 @@ struct StoreOcxOw4Remain<1, 0, Op, T> {
template <typename Op, typename T>
struct StoreOcxOw4Remain<1, 3, Op, T> {
static void impl(T& c, const Op& op, int8_t* dst_ptr, int ld_dst_oc) {
MEGDNN_MARK_USED_VAR(ld_dst_oc);
op({{c[0][0], c[0][1]}}, reinterpret_cast<dt_qint8*>(dst_ptr));
op(c[0][2], reinterpret_cast<dt_qint8*>(dst_ptr + 8));
}
......@@ -156,12 +158,14 @@ struct StoreOcxOw4Remain<1, 3, Op, T> {
template <typename Op, typename T>
struct StoreOcxOw4Remain<1, 2, Op, T> {
static void impl(T& c, const Op& op, int8_t* dst_ptr, int ld_dst_oc) {
MEGDNN_MARK_USED_VAR(ld_dst_oc);
op({{c[0][0], c[0][1]}}, reinterpret_cast<dt_qint8*>(dst_ptr));
}
};
template <typename Op, typename T>
struct StoreOcxOw4Remain<1, 1, Op, T> {
static void impl(T& c, const Op& op, int8_t* dst_ptr, int ld_dst_oc) {
MEGDNN_MARK_USED_VAR(ld_dst_oc);
op(c[0][0], reinterpret_cast<dt_qint8*>(dst_ptr));
}
};
......@@ -360,6 +364,7 @@ struct InitOcxOw4<2, bias_mode, T> {
template <BiasMode bias_mode, typename T>
struct InitOcxOw4<1, bias_mode, T> {
static void impl(T& c, const int32_t* bias_ptr, int oc_step) {
MEGDNN_MARK_USED_VAR(oc_step);
if (bias_mode == BiasMode::BROADCAST_CHANNEL_BIAS) {
#define BAIS_INIT(step) c[0][step] = vld1q_s32(bias_ptr);
UNROLL_CALL_RAW(4, BAIS_INIT);
......@@ -412,6 +417,7 @@ template <int base_offset, int ptr_step, typename Func, typename T,
typename... XT>
struct LoadHelper<4, base_offset, ptr_step, 0, Func, T, XT...> {
static void impl(T& src, const int8_t* ptr, int oc_offset, XT... args) {
MEGDNN_MARK_USED_VAR(oc_offset);
UNROLL_CALL_RAW(4, WEIGHT_CB);
}
};
......@@ -419,6 +425,7 @@ template <int base_offset, int ptr_step, typename Func, typename T,
typename... XT>
struct LoadHelper<5, base_offset, ptr_step, 0, Func, T, XT...> {
static void impl(T& src, const int8_t* ptr, int oc_offset, XT... args) {
MEGDNN_MARK_USED_VAR(oc_offset);
UNROLL_CALL_RAW(5, WEIGHT_CB);
}
};
......@@ -426,6 +433,7 @@ template <int base_offset, int ptr_step, typename Func, typename T,
typename... XT>
struct LoadHelper<6, base_offset, ptr_step, 0, Func, T, XT...> {
static void impl(T& src, const int8_t* ptr, int oc_offset, XT... args) {
MEGDNN_MARK_USED_VAR(oc_offset);
UNROLL_CALL_RAW(6, WEIGHT_CB);
}
};
......@@ -436,12 +444,14 @@ struct LoadHelper<6, base_offset, ptr_step, 0, Func, T, XT...> {
template <int base_offset, int ptr_step, typename Func, typename T>
struct LoadHelper<1, base_offset, ptr_step, 1, Func, T> {
static void impl(T& src, const int8_t* ptr, int oc_offset) {
MEGDNN_MARK_USED_VAR(oc_offset);
UNROLL_CALL_RAW(1, WEIGHT_CB);
}
};
template <int base_offset, int ptr_step, typename Func, typename T>
struct LoadHelper<2, base_offset, ptr_step, 1, Func, T> {
static void impl(T& src, const int8_t* ptr, int oc_offset) {
MEGDNN_MARK_USED_VAR(oc_offset);
UNROLL_CALL_RAW(2, WEIGHT_CB);
}
};
......@@ -449,6 +459,7 @@ struct LoadHelper<2, base_offset, ptr_step, 1, Func, T> {
template <int base_offset, int ptr_step, typename Func, typename T>
struct LoadHelper<3, base_offset, ptr_step, 1, Func, T> {
static void impl(T& src, const int8_t* ptr, int oc_offset) {
MEGDNN_MARK_USED_VAR(oc_offset);
UNROLL_CALL_RAW(3, WEIGHT_CB);
}
};
......@@ -497,4 +508,4 @@ inline void load_helper_x(T& weight, const int8_t* ptr, int oc_offset,
}
} // namespace
} // namespace megdnn
\ No newline at end of file
} // namespace megdnn
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册