diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c0f6a275f798986171318c68be466d12bc2925ca..9c6ad627ba4fb6830315827426274a5da7649611 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1155,6 +1155,15 @@ config ARCH_WANT_HUGE_PMD_SHARE config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_LLC_128_LINE_SIZE + bool "Force 128 bytes alignment for fitting LLC cacheline" + depends on ARM64 + default y + help + As specific machine's LLC cacheline size may be up to + 128 bytes, gaining performance improvement from fitting + 128 Bytes LLC cache aligned. + config ARCH_HAS_FILTER_PGPROT def_bool y diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index cf8e78585865a2f369129a81a4791b0c48e9db63..f7e1d1bb81727f349f1fac2899fc12cc43f1d3b1 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -31,6 +31,12 @@ #define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE +#ifndef ____cacheline_aligned_128 +#define ____cacheline_aligned_128 __attribute__((__aligned__(128))) +#endif +#endif + #define CLIDR_LOUU_SHIFT 27 #define CLIDR_LOC_SHIFT 24 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index cc4dc2857a8706c292949dc67541c1e6af9536fe..72cad9bf19d70e71fd6345d7a9fc4b2701ff9cef 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -46,9 +46,16 @@ DEFINE_RAW_SPINLOCK(timekeeper_lock); * cache line. */ static struct { +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE + u64 padding[8]; +#endif seqcount_raw_spinlock_t seq; struct timekeeper timekeeper; +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE +} tk_core ____cacheline_aligned_128 = { +#else } tk_core ____cacheline_aligned = { +#endif .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock), };