提交 1af48631 编写于 作者: M Megvii Engine Team

fix(dnn/fallback): fix conv1x1's is_preferred method

GitOrigin-RevId: 6d34080d2557614cea97443d7a8153299714be31
上级 206521cd
......@@ -64,8 +64,8 @@ public:
all_algos.emplace_back(&int8x8x32_k8x12x4_dotprod);
#else
all_algos.emplace_back(&int8x8x32_gemv);
all_algos.emplace_back(&int8x8x32_k8x8x8);
all_algos.emplace_back(&int8x8x32_k4x4x16);
all_algos.emplace_back(&int8x8x32_k8x8x8);
all_algos.emplace_back(&int8x8x32_mk4_4x4x16);
#endif
all_algos.emplace_back(&int8x8x16_k4x4x16);
......
......@@ -61,11 +61,11 @@ public:
all_algos.emplace_back(&int8x8x32_gemv);
#endif
all_algos.emplace_back(&int8x8x32_mk4_4x2x16);
all_algos.emplace_back(&int8x8x32_k4x8x8);
all_algos.emplace_back(&int8x8x32_k4x2x16);
all_algos.emplace_back(&int8x8x32_k4x8x8);
all_algos.emplace_back(&quint8_k4x8x8);
all_algos.emplace_back(&int8x8x16_k4x8x8);
all_algos.emplace_back(&int8x8x16_k4x2x16);
all_algos.emplace_back(&int8x8x16_k4x8x8);
all_algos.emplace_back(&int16x16x32_k12x4x1);
all_algos.emplace_back(&int16x16x32_mk8_4x8);
}
......
......@@ -41,6 +41,10 @@ public:
SmallVector<NCBKern> dispatch_kerns(
ConvBiasImpl* opr, const NCBKernSizeParam& param) const override;
bool is_preferred(ConvBiasImpl*, const NCBKernSizeParam&) const override{
return true;
}
protected:
size_t get_oc_tile_size_heuristic(const NCBKernSizeParam& param) const;
......
......@@ -58,7 +58,9 @@ class CpuCompNode::WorkerQueue final
void on_async_queue_worker_thread_start() override {
mgb_assert(m_locator.device >= 0);
if (enable_affinity) {
#if !defined(ANDROID) && !defined(__ANDROID__)
sys::set_cpu_affinity({m_locator.device});
#endif
}
sys::set_thread_name(m_locator.to_string());
if(m_thread_pool)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册