提交 03d68058 编写于 作者: M Megvii Engine Team 提交者: Xinran Xu

fix(windows/dnn/ci): fix cpu x86_windows_xp flags build issue

*as xp 32 mkl crash issue after "11.1.4 (build date 20140806)"
we can not upgrade intel_mkl_windows_xp_archive to
MKL 2019.0.1 (build date 20180928), so we make a difference between
xp with linux/macos/windows_no_xp

GitOrigin-RevId: 97b049d7893fec217aaa837f0a6909991b788cf0
上级 d34e385d
...@@ -14,21 +14,9 @@ ...@@ -14,21 +14,9 @@
#include "src/common/utils.h" #include "src/common/utils.h"
#include "src/fallback/matrix_mul/gemm_impl.h" #include "src/fallback/matrix_mul/gemm_impl.h"
#include "src/x86/matrix_mul/int8/strategy.h" #include "src/x86/matrix_mul/int8/strategy.h"
#include "src/x86/utils.h"
#include "src/x86/matrix_mul/f32/strategy.h" #include "src/x86/matrix_mul/f32/strategy.h"
#if MEGDNN_X86_WITH_MKL
#include <mkl.h>
#include <mkl_cblas.h>
#elif MEGDNN_X86_WITH_OPENBLAS
#include <cblas.h>
#else
#endif
#if MEGDNN_X86_WITH_MKL_DNN
#include <mkldnn.h>
#endif
MIDOUT_DECL(megdnn_x86_matmul_kern) MIDOUT_DECL(megdnn_x86_matmul_kern)
MIDOUT_DECL(megdnn_x86_matmul_kern_mk8_8x8) MIDOUT_DECL(megdnn_x86_matmul_kern_mk8_8x8)
...@@ -55,7 +43,7 @@ void f32_blas_kern(const MatrixMulImpl::KernParam& kern_param) { ...@@ -55,7 +43,7 @@ void f32_blas_kern(const MatrixMulImpl::KernParam& kern_param) {
#endif #endif
} }
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
void f32_blas_kern_only_packA(const MatrixMulImpl::KernParam& kern_param, void f32_blas_kern_only_packA(const MatrixMulImpl::KernParam& kern_param,
const void* a_panel, const void* b_panel) { const void* a_panel, const void* b_panel) {
MEGDNN_MARK_USED_VAR(b_panel); MEGDNN_MARK_USED_VAR(b_panel);
...@@ -93,7 +81,7 @@ MatrixMulImpl::kern_t MatrixMulImpl::AlgoF32Blas::get_kern( ...@@ -93,7 +81,7 @@ MatrixMulImpl::kern_t MatrixMulImpl::AlgoF32Blas::get_kern(
} }
/* ===================== AlgoF32BlasPackA====================== */ /* ===================== AlgoF32BlasPackA====================== */
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
bool MatrixMulImpl::AlgoF32MKLPackA::usable( bool MatrixMulImpl::AlgoF32MKLPackA::usable(
const KernSizeParam& kern_size_param) const { const KernSizeParam& kern_size_param) const {
return kern_size_param.compute_mode == Param::ComputeMode::DEFAULT && return kern_size_param.compute_mode == Param::ComputeMode::DEFAULT &&
......
...@@ -28,7 +28,7 @@ public: ...@@ -28,7 +28,7 @@ public:
PackMode packmode() const override { return PackMode::NO_PACK; } PackMode packmode() const override { return PackMode::NO_PACK; }
}; };
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
class MatrixMulImpl::AlgoF32MKLPackA : public AlgoBase { class MatrixMulImpl::AlgoF32MKLPackA : public AlgoBase {
public: public:
bool is_reproducible() const override { return true; } bool is_reproducible() const override { return true; }
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "src/x86/matrix_mul/opr_impl.h" #include "src/x86/matrix_mul/opr_impl.h"
#include "src/common/metahelper.h" #include "src/common/metahelper.h"
#include "src/x86/matrix_mul/algos.h" #include "src/x86/matrix_mul/algos.h"
#include "src/x86/utils.h"
using namespace megdnn; using namespace megdnn;
using namespace x86; using namespace x86;
...@@ -25,7 +24,7 @@ void* const MatrixMulImpl::sm_x86_algo_type = &x86_algo_type_storage; ...@@ -25,7 +24,7 @@ void* const MatrixMulImpl::sm_x86_algo_type = &x86_algo_type_storage;
class MatrixMulImpl::AlgoPack : NonCopyableObj { class MatrixMulImpl::AlgoPack : NonCopyableObj {
AlgoF32Blas f32blas; AlgoF32Blas f32blas;
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
AlgoF32MKLPackA f32mkl_packa; AlgoF32MKLPackA f32mkl_packa;
#endif #endif
#if MEGDNN_X86_WITH_VNNI #if MEGDNN_X86_WITH_VNNI
...@@ -57,7 +56,7 @@ public: ...@@ -57,7 +56,7 @@ public:
all_algos.emplace_back(&algoint8x8x32mkldnn); all_algos.emplace_back(&algoint8x8x32mkldnn);
#endif #endif
all_algos.emplace_back(&f32blas); all_algos.emplace_back(&f32blas);
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
all_algos.emplace_back(&f32mkl_packa); all_algos.emplace_back(&f32mkl_packa);
#endif #endif
} }
......
...@@ -9,8 +9,22 @@ ...@@ -9,8 +9,22 @@
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/ */
#pragma once #pragma once
#include "src/common/utils.h" #include "src/common/utils.h"
#include "src/fallback/matrix_mul/opr_impl.h" #include "src/fallback/matrix_mul/opr_impl.h"
#include "src/x86/utils.h"
#if MEGDNN_X86_WITH_MKL
#include <mkl.h>
#include <mkl_cblas.h>
#elif MEGDNN_X86_WITH_OPENBLAS
#include <cblas.h>
#else
#endif
#if MEGDNN_X86_WITH_MKL_DNN
#include <mkldnn.h>
#endif
namespace megdnn { namespace megdnn {
namespace x86 { namespace x86 {
...@@ -26,7 +40,7 @@ public: ...@@ -26,7 +40,7 @@ public:
protected: protected:
static void* const sm_x86_algo_type; static void* const sm_x86_algo_type;
class AlgoF32Blas; class AlgoF32Blas;
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
class AlgoF32MKLPackA; class AlgoF32MKLPackA;
#endif #endif
#if MEGDNN_X86_WITH_VNNI #if MEGDNN_X86_WITH_VNNI
......
...@@ -14,6 +14,17 @@ ...@@ -14,6 +14,17 @@
#include <vector> #include <vector>
#include "src/common/utils.h" #include "src/common/utils.h"
#if MEGDNN_X86_WITH_MKL
#include <mkl.h>
//! As INTEL_MKL_VERSION >= 20190001 support SUPPORT_MKL_PACKED_GEMM
#if INTEL_MKL_VERSION >= 20190001
#define SUPPORT_MKL_PACKED_GEMM 1
#else
#define SUPPORT_MKL_PACKED_GEMM 0
#endif
#endif
namespace megdnn { namespace megdnn {
namespace x86 { namespace x86 {
......
...@@ -853,7 +853,7 @@ TEST_F(X86_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_FP32) { ...@@ -853,7 +853,7 @@ TEST_F(X86_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_FP32) {
#undef cb #undef cb
} }
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
TEST_F(X86_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_FP32_PACKA) { TEST_F(X86_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_FP32_PACKA) {
using namespace conv_bias; using namespace conv_bias;
std::vector<TestArg> args; std::vector<TestArg> args;
......
...@@ -52,7 +52,7 @@ TEST_F(X86, MATRIX_MUL_SSE_8X8X32) { ...@@ -52,7 +52,7 @@ TEST_F(X86, MATRIX_MUL_SSE_8X8X32) {
handle(), "X86_INT8X8X32_SSE_4X8X2"); handle(), "X86_INT8X8X32_SSE_4X8X2");
} }
#if MEGDNN_X86_WITH_MKL #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
TEST_F(X86, MATRIX_MUL_MKL_PACKA) { TEST_F(X86, MATRIX_MUL_MKL_PACKA) {
matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{}, matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{},
dtype::Float32{}, handle(), dtype::Float32{}, handle(),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册