提交 410dcb6c 编写于 作者: M Megvii Engine Team

feat(fallback): add more gi api for conv, and add gi API test

GitOrigin-RevId: 24eb2375029d1f589f40aaee7421924a3d7ee07b
上级 a0e53118
......@@ -38,8 +38,10 @@
#ifdef _WIN32
//! GI stand for general intrinsic
#define _GI_ALIGN_16 __declspec(align(16))
#define GI_DECLSPEC_ALIGN(variable, alignment) DECLSPEC_ALIGN(alignment) variable
#else
#define _GI_ALIGN_16 __attribute__((aligned(16)))
#define GI_DECLSPEC_ALIGN(variable, alignment) \
variable __attribute__((aligned(alignment)))
#endif
......@@ -82,8 +84,50 @@
#endif
#endif
#if defined(GI_TEST_NAIVE)
#undef GI_NEON_INTRINSICS
#undef GI_NEON64_INTRINSICS
#undef GI_NEON32_INTRINSICS
#undef GI_FMA_INTRINSICS
#undef GI_AVX2_INTRINSICS
#undef GI_AVX_INTRINSICS
#undef GI_SSE42_INTRINSICS
#undef GI_SSE2_INTRINSICS
#endif
//! general intrinsic support dynamic length simd, if avx or avx2 the simd
//! length is 256
#if defined(GI_AVX_INTRINSICS) || defined(GI_AVX2_INTRINSICS) || \
defined(GI_FMA_INTRINSICS)
//! if neon and sse the simd lenght is 128
#define GI_SIMD_LEN 256
#define GI_SIMD_LEN_BYTE 32
#elif defined(GI_NEON_INTRINSICS) || defined(GI_SSE2_INTRINSICS) || \
defined(GI_SSE42_INTRINSICS)
#define GI_SIMD_LEN 128
#define GI_SIMD_LEN_BYTE 16
#else
//! if no simd hardware support, the simd is implemented by C, default set to
//! 128
#define GI_SIMD_LEN 128
#define GI_SIMD_LEN_BYTE 16
#endif
#define gi_trap() __builtin_trap()
//! for ci test now
enum GiSimdType {
GI_UNKNOWN,
GI_NAIVE,
GI_AVX,
GI_SSE42,
GI_SSE2,
GI_NEON,
};
#if defined(GI_AVX_INTRINSICS) || defined(GI_AVX2_INTRINSICS) || \
defined(GI_FMA_INTRINSICS)
#define __gi_simd_type GI_AVX
typedef __m256 GI_FLOAT32_t;
typedef __m256i GI_UINT8_t;
typedef __m256i GI_INT8_t;
......@@ -91,46 +135,177 @@ typedef __m256i GI_INT16_t;
typedef __m256i GI_INT32_t;
typedef __m256i GI_UINT32_t;
#elif defined(GI_NEON_INTRINSICS)
#define __gi_simd_type GI_NEON
typedef float32x4_t GI_FLOAT32_t;
typedef uint8x16_t GI_UINT8_t;
typedef int8x16_t GI_INT8_t;
typedef int16x8_t GI_INT16_t;
typedef int32x4_t GI_INT32_t;
typedef uint32x4_t GI_UINT32_t;
typedef float32x4x2_t GI_FLOAT32_V2_t;
typedef float32x4x4_t GI_FLOAT32_V4_t;
typedef int32x4x2_t GI_INT32_V2_t;
typedef int32x4x4_t GI_INT32_V4_t;
typedef int16x8x2_t GI_INT16_V2_t;
typedef int8x16x2_t GI_INT8_V2_t;
typedef int64x2_t GI_INT64_t;
#elif defined(GI_SSE2_INTRINSICS) || defined(GI_SSE42_INTRINSICS)
#if defined(GI_SSE42_INTRINSICS)
#define __gi_simd_type GI_SSE42
#elif defined(GI_SSE2_INTRINSICS)
#define __gi_simd_type GI_SSE2
#else
#define __gi_simd_type GI_UNKNOWN
#error "code issue happened!!"
#endif
typedef __m128 GI_FLOAT32_t;
typedef __m128i GI_UINT8_t;
typedef __m128i GI_INT8_t;
typedef __m128i GI_INT16_t;
typedef __m128i GI_INT32_t;
typedef __m128i GI_UINT32_t;
typedef __m128i GI_INT64_t;
#define _INSERTPS_NDX(srcField, dstField) (((srcField) << 6) | ((dstField) << 4))
#define _M64(out, inp) _mm_storel_epi64((__m128i*)&(out), inp)
#define _pM128i(a) _mm_loadl_epi64((__m128i*)&(a))
#define _pM128(a) _mm_castsi128_ps(_pM128i(a))
#define _M128i(a) _mm_castps_si128(a)
#define _M128(a) _mm_castsi128_ps(a)
#if defined(__x86_64__)
#define _M64f(out, inp) out.m64_i64[0] = _mm_cvtsi128_si64(_M128i(inp));
#else
#define _M64f(out, inp) _mm_storel_epi64((__m128i*)&(out), _M128i(inp))
#endif
#define _SSE_SWITCH16(NAME, a, b, LANE) \
switch (LANE) { \
case 0: \
return NAME(a b, 0); \
case 1: \
return NAME(a b, 1); \
case 2: \
return NAME(a b, 2); \
case 3: \
return NAME(a b, 3); \
case 4: \
return NAME(a b, 4); \
case 5: \
return NAME(a b, 5); \
case 6: \
return NAME(a b, 6); \
case 7: \
return NAME(a b, 7); \
case 8: \
return NAME(a b, 8); \
case 9: \
return NAME(a b, 9); \
case 10: \
return NAME(a b, 10); \
case 11: \
return NAME(a b, 11); \
case 12: \
return NAME(a b, 12); \
case 13: \
return NAME(a b, 13); \
case 14: \
return NAME(a b, 14); \
case 15: \
return NAME(a b, 15); \
default: \
gi_trap(); \
return NAME(a b, 0); \
}
#if !defined(__SSE3__)
GI_FORCEINLINE __m128i _sse2_mm_alignr_epi8(__m128i b, __m128i a, int imm8) {
int imm2 = sizeof(__m128i) - imm8;
return _mm_or_si128(_mm_srli_si128(a, imm8), _mm_slli_si128(b, imm2));
}
#endif
#define _SSE_COMMA ,
GI_FORCEINLINE __m128i _MM_ALIGNR_EPI8(__m128i a, __m128i b, int LANE) {
#if !defined(__SSE3__)
_SSE_SWITCH16(_sse2_mm_alignr_epi8, a, _SSE_COMMA b, LANE)
#else
_SSE_SWITCH16(_mm_alignr_epi8, a, _SSE_COMMA b, LANE)
#endif
}
typedef float float32_t;
typedef double float64_t;
typedef union __m64_128 {
uint64_t m64_u64[1];
int64_t m64_i64[1];
float64_t m64_d64[1];
uint32_t m64_u32[2];
int32_t m64_i32[2];
float32_t m64_f32[2];
int16_t m64_i16[4];
uint16_t m64_u16[4];
int8_t m64_i8[8];
uint8_t m64_u8[8];
} __m64_128;
typedef __m64_128 float32x2_t;
#define return64(a) \
_M64(res64, a); \
return res64;
#define return64f(a) \
_M64f(res64, a); \
return res64;
#define _sse_vextq_s32(a, b, c) _MM_ALIGNR_EPI8(b, a, c * 4)
#define _sse_vget_lane_f32(vec, lane) vec.m64_f32[lane]
#else
#define __gi_simd_type GI_NAIVE
typedef float GI_FLOAT32_t __attribute__((vector_size(16)));
typedef uint8_t GI_UINT8_t __attribute__((vector_size(16)));
typedef int8_t GI_INT8_t __attribute__((vector_size(16)));
typedef int16_t GI_INT16_t __attribute__((vector_size(16)));
typedef int32_t GI_INT32_t __attribute__((vector_size(16)));
typedef uint32_t GI_UINT32_t __attribute__((vector_size(16)));
typedef int64_t GI_INT64_t __attribute__((vector_size(16)));
#if !defined(__arm__) && !defined(__aarch64__)
typedef float float32x2_t __attribute__((vector_size(8)));
#endif
//! general intrinsic support dynamic length simd, if avx or avx2 the simd
//! length is 256
#if defined(GI_AVX_INTRINSICS) || defined(GI_AVX2_INTRINSICS) || \
defined(GI_FMA_INTRINSICS)
//! if neon and sse the simd lenght is 128
#define GI_SIMD_LEN 256
#define GI_SIMD_LEN_BYTE 32
#elif defined(GI_NEON_INTRINSICS) || defined(GI_SSE2_INTRINSICS) || \
defined(GI_SSE42_INTRINSICS)
#define GI_SIMD_LEN 128
#define GI_SIMD_LEN_BYTE 16
#else
//! if no simd hardware support, the simd is implemented by C, default set to
//! 128
#define GI_SIMD_LEN 128
#define GI_SIMD_LEN_BYTE 16
typedef float float32_t;
#endif
//! some GI api do not support full GiSimdType
//! for example: GiAbsInt32 do not imp SSE2 case
//! when *_t will define as _m128*(may be long long)
//! vector index do not have same logic as naive vector
typedef float GI_FLOAT32_NAIVE_t __attribute__((vector_size(16)));
typedef uint8_t GI_UINT8_NAIVE_t __attribute__((vector_size(16)));
typedef int8_t GI_INT8_NAIVE_t __attribute__((vector_size(16)));
typedef int16_t GI_INT16_NAIVE_t __attribute__((vector_size(16)));
typedef int32_t GI_INT32_NAIVE_t __attribute__((vector_size(16)));
typedef uint32_t GI_UINT32_NAIVE_t __attribute__((vector_size(16)));
typedef int64_t GI_INT64_NAIVE_t __attribute__((vector_size(16)));
typedef float float32x2_NAIVE_t __attribute__((vector_size(8)));
typedef struct {
GI_INT32_NAIVE_t val[2];
} GI_INT32_V2_NAIVE_t;
typedef struct {
GI_INT32_NAIVE_t val[4];
} GI_INT32_V4_NAIVE_t;
typedef struct {
GI_FLOAT32_NAIVE_t val[2];
} GI_FLOAT32_V2_NAIVE_t;
typedef struct {
GI_FLOAT32_NAIVE_t val[4];
} GI_FLOAT32_V4_NAIVE_t;
typedef struct {
GI_INT16_NAIVE_t val[2];
} GI_INT16_V2_NAIVE_t;
typedef struct {
GI_INT8_NAIVE_t val[2];
} GI_INT8_V2_NAIVE_t;
#define Max(a, b) (a) > (b) ? (a) : (b)
#define Min(a, b) (a) < (b) ? (a) : (b)
......@@ -146,6 +321,7 @@ typedef uint32_t GI_UINT32_t __attribute__((vector_size(16)));
#endif
#endif
#if !defined(GI_NEON_INTRINSICS)
typedef struct {
GI_INT32_t val[2];
} GI_INT32_V2_t;
......@@ -169,6 +345,7 @@ typedef struct {
typedef struct {
GI_INT8_t val[2];
} GI_INT8_V2_t;
#endif
GI_FORCEINLINE
GI_INT32_t GiAndInt32(GI_INT32_t Vector1, GI_INT32_t Vector2) {
......@@ -259,6 +436,34 @@ GI_INT8_t GiBroadcastInt8(int8_t Value) {
#endif
}
GI_FORCEINLINE
GiSimdType GiGetSimdType() {
//! override by special macro to insure ci have test naive and sse2
//! now we do not imp GI_AVX to now and x64 ci device will test GI_SSE42
//! now arm ci device will test GI_NEON
//! insure test GI_SSE2 by command:
//! --copt -march=core2 --copt -mno-sse4.2
//! --copt -mno-sse3 --copt -DGI_TEST_SSE2
//! insure test GI_NAIVE by command:
//! --copt -DGI_TEST_SSE2
//! DNN code at least need sse2 at x86
//! so we can not test GI_NAIVE by
//! --copt -march=core2 --copt -mno-sse4.2
//! --copt -mno-sse3 --copt -mno-sse2
//! --copt -DGI_TEST_NAIVE
//! about CMake, can override build flags to CMAKE_CXX_FLAGS/CMAKE_C_FLAGS by
//! EXTRA_CMAKE_ARGS when use scripts/cmake-build/*.sh
#if defined(GI_TEST_NAIVE)
#undef __gi_simd_type
#define __gi_simd_type GI_NAIVE
#elif defined(GI_TEST_SSE2)
#undef __gi_simd_type
#define __gi_simd_type GI_SSE2
#endif
return __gi_simd_type;
}
__attribute__((unused)) const GI_INT8_t vzero_int8 = GiBroadcastInt8(0);
__attribute__((unused)) const GI_INT32_t vzero = GiBroadcastInt32(0);
__attribute__((unused)) const GI_FLOAT32_t vfzero = GiBroadcastFloat32(0.0f);
......
......@@ -214,8 +214,12 @@ GI_UINT32_t GiTestAndSetUint32(GI_UINT32_t Vector1, GI_UINT32_t Vector2) {
#if defined(GI_NEON_INTRINSICS)
return vtstq_u32(Vector1, Vector2);
#elif defined(GI_SSE2_INTRINSICS)
GI_UINT32_t tmp = _mm_and_si128(Vector1, Vector2);
return _mm_cmpeq_epi32(tmp, _mm_setzero_si128());
__m128i zero, one, res;
zero = _mm_setzero_si128();
one = _mm_cmpeq_epi8(zero, zero);
res = _mm_and_si128(Vector1, Vector2);
res = _mm_cmpeq_epi32(res, zero);
return _mm_xor_si128(res, one);
#else
GI_UINT32_t ret;
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int32_t); i++) {
......@@ -451,9 +455,15 @@ GI_INT32_t GiAbsInt32(GI_INT32_t Vector) {
return _mm_abs_epi32(Vector);
#else
GI_INT32_t ret;
GI_INT32_NAIVE_t tmp_ret;
GI_INT32_NAIVE_t s0;
memcpy(&s0, &Vector, sizeof(GI_INT32_t));
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int32_t); i++) {
ret[i] = Vector[i] > 0 ? Vector[i] : -Vector[i];
tmp_ret[i] = s0[i] > 0 ? s0[i] : -s0[i];
}
memcpy(&ret, &tmp_ret, sizeof(GI_INT32_t));
return ret;
#endif
}
......@@ -466,9 +476,14 @@ GI_INT16_t GiAbsInt16(GI_INT16_t Vector) {
return _mm_abs_epi16(Vector);
#else
GI_INT16_t ret;
GI_INT16_NAIVE_t tmp_ret;
GI_INT16_NAIVE_t s0;
memcpy(&s0, &Vector, sizeof(GI_INT16_t));
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int16_t); i++) {
ret[i] = Vector[i] > 0 ? Vector[i] : -Vector[i];
tmp_ret[i] = s0[i] > 0 ? s0[i] : -s0[i];
}
memcpy(&ret, &tmp_ret, sizeof(GI_INT16_t));
return ret;
#endif
}
......@@ -481,9 +496,14 @@ GI_INT8_t GiAbsInt8(GI_INT8_t Vector) {
return _mm_abs_epi8(Vector);
#else
GI_INT8_t ret;
GI_INT8_NAIVE_t tmp_ret;
GI_INT8_NAIVE_t s0;
memcpy(&s0, &Vector, sizeof(GI_INT8_t));
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int8_t); i++) {
ret[i] = Vector[i] > 0 ? Vector[i] : -Vector[i];
tmp_ret[i] = s0[i] > 0 ? s0[i] : -s0[i];
}
memcpy(&ret, &tmp_ret, sizeof(GI_INT8_t));
return ret;
#endif
}
......@@ -497,7 +517,11 @@ GI_INT32_t GiMaximumInt32(GI_INT32_t Vector1, GI_INT32_t Vector2) {
#elif defined(GI_SSE2_INTRINSICS)
return GiBlendInt32(Vector2, Vector1, _mm_cmpgt_epi32(Vector1, Vector2));
#else
return GiBlendInt32(Vector2, Vector1, Vector1 > Vector2);
GI_INT32_t tmp;
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int32_t); i++) {
tmp[i] = Vector1[i] > Vector2[i] ? 0xFFFFFFFF : 0;
}
return GiBlendInt32(Vector2, Vector1, tmp);
#endif
}
......@@ -510,7 +534,11 @@ GI_INT32_t GiMinimumInt32(GI_INT32_t Vector1, GI_INT32_t Vector2) {
#elif defined(GI_SSE2_INTRINSICS)
return GiBlendInt32(Vector2, Vector1, _mm_cmpgt_epi32(Vector2, Vector1));
#else
return GiBlendInt32(Vector2, Vector1, Vector2 > Vector1);
GI_INT32_t tmp;
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int32_t); i++) {
tmp[i] = Vector2[i] > Vector1[i] ? 0xFFFFFFFF : 0;
}
return GiBlendInt32(Vector2, Vector1, tmp);
#endif
}
......@@ -528,7 +556,11 @@ GI_INT8_t GiMaximumInt8(GI_INT8_t Vector1, GI_INT8_t Vector2) {
#elif defined(GI_SSE2_INTRINSICS)
return GiBlendInt8(Vector2, Vector1, _mm_cmpgt_epi8(Vector1, Vector2));
#else
return GiBlendInt8(Vector2, Vector1, Vector1 > Vector2);
GI_INT8_t tmp;
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int8_t); i++) {
tmp[i] = Vector1[i] > Vector2[i] ? 0xFF : 0;
}
return GiBlendInt8(Vector2, Vector1, tmp);
#endif
}
......@@ -541,7 +573,11 @@ GI_INT8_t GiMinimumInt8(GI_INT8_t Vector1, GI_INT8_t Vector2) {
#elif defined(GI_SSE2_INTRINSICS)
return GiBlendInt8(Vector2, Vector1, _mm_cmpgt_epi8(Vector2, Vector1));
#else
return GiBlendInt8(Vector2, Vector1, Vector2 > Vector1);
GI_INT8_t tmp;
for (size_t i = 0; i < GI_SIMD_LEN_BYTE / sizeof(int8_t); i++) {
tmp[i] = Vector2[i] > Vector1[i] ? 0xFF : 0;
}
return GiBlendInt8(Vector2, Vector1, tmp);
#endif
}
......@@ -813,14 +849,18 @@ GI_INT8_t GiCvtFromFloat32ToInt8(GI_FLOAT32_t src) {
return vepi8;
#else
GI_INT8_t ret;
GI_INT8_NAIVE_t tmp_ret;
GI_FLOAT32_NAIVE_t s0;
memcpy(&s0, &src, sizeof(GI_INT32_t));
int length = GI_SIMD_LEN_BYTE / sizeof(float);
for (int i = 0; i < length; i++) {
int8_t data = Saturate(round(src[i]), -128, 127);
ret[i] = data;
ret[length + i] = data;
ret[2 * length + i] = data;
ret[3 * length + i] = data;
int8_t data = Saturate(round(s0[i]), -128, 127);
tmp_ret[i] = data;
tmp_ret[length + i] = data;
tmp_ret[2 * length + i] = data;
tmp_ret[3 * length + i] = data;
}
memcpy(&ret, &tmp_ret, sizeof(GI_INT8_t));
return ret;
#endif
}
......@@ -861,10 +901,16 @@ GI_INT8_t GiCvtFromFloat32V2ToInt8(GI_FLOAT32_V2_t vsrc) {
return vepi8;
#else
GI_INT8_t ret;
GI_INT8_NAIVE_t tmp_ret;
GI_FLOAT32_V2_NAIVE_t s0;
memcpy(&s0, &vsrc, sizeof(GI_FLOAT32_V2_NAIVE_t));
int length = GI_SIMD_LEN_BYTE / sizeof(float);
for (int i = 0; i < 2 * length; i++) {
ret[i] = Saturate(round(vsrc.val[i / length][i % length]), -128, 127);
int8_t data = Saturate(round(s0.val[i / length][i % length]), -128, 127);
tmp_ret[i] = data;
tmp_ret[i + length * 2] = data;
}
memcpy(&ret, &tmp_ret, sizeof(GI_INT8_t));
return ret;
#endif
}
......@@ -875,8 +921,8 @@ GI_INT8_t GiCvtFromFloat32V4ToInt8(GI_FLOAT32_V4_t vsrc) {
#if __ARM_ARCH >= 8
int32x4_t vres0 = vcvtaq_s32_f32(vsrc.val[0]);
int32x4_t vres1 = vcvtaq_s32_f32(vsrc.val[1]);
int32x4_t vres2 = vcvtaq_s32_f32(vsrc.val[1]);
int32x4_t vres3 = vcvtaq_s32_f32(vsrc.val[1]);
int32x4_t vres2 = vcvtaq_s32_f32(vsrc.val[2]);
int32x4_t vres3 = vcvtaq_s32_f32(vsrc.val[3]);
int8x8_t mid1 = vqmovn_s16(vcombine_s16(vqmovn_s32(vres0), vqmovn_s32(vres1)));
int8x8_t mid2 = vqmovn_s16(vcombine_s16(vqmovn_s32(vres2), vqmovn_s32(vres3)));
return vcombine_s8(mid1, mid2);
......@@ -910,7 +956,7 @@ GI_INT8_t GiCvtFromFloat32V4ToInt8(GI_FLOAT32_V4_t vsrc) {
vres0 = _mm_round_ps(vres0, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
vres1 = _mm_round_ps(vres1, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
vres2 = _mm_round_ps(vres2, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
vres3 = _mm_round_ps(vres1, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
vres3 = _mm_round_ps(vres3, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
vres0 = _mm_min_ps(_mm_max_ps(vres0, vfmin_int8), vfmax_int8);
vres1 = _mm_min_ps(_mm_max_ps(vres1, vfmin_int8), vfmax_int8);
......@@ -927,10 +973,14 @@ GI_INT8_t GiCvtFromFloat32V4ToInt8(GI_FLOAT32_V4_t vsrc) {
return vepi8;
#else
GI_INT8_t ret;
GI_INT8_NAIVE_t tmp_ret;
GI_FLOAT32_V4_NAIVE_t s0;
memcpy(&s0, &vsrc, sizeof(GI_FLOAT32_V4_NAIVE_t));
int length = GI_SIMD_LEN_BYTE / sizeof(float);
for (int i = 0; i < 4 * length; i++) {
ret[i] = Saturate(round(vsrc.val[i / length][i % length]), -128, 127);
tmp_ret[i] = Saturate(round(s0.val[i / length][i % length]), -128, 127);
}
memcpy(&ret, &tmp_ret, sizeof(GI_INT8_t));
return ret;
#endif
}
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册