diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e5790fe9e330acf69482fd7e988c47b9075295b5..a8ce13a5476451fdee6296488fd8dd0d0ba14fdc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -114,6 +114,9 @@ config GENERIC_TIME_VSYSCALL config ARCH_HAS_CPU_RELAX def_bool y +config ARCH_HAS_CACHE_LINE_SIZE + def_bool y + config HAVE_SETUP_PER_CPU_AREA def_bool X86_64 || (X86_SMP && !X86_VOYAGER) diff --git a/include/linux/cache.h b/include/linux/cache.h index 4552504c02289239953aab738adede76c8da20f3..97e24881c4c6f477496130c982e1962e770df9eb 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -60,4 +60,8 @@ #endif #endif +#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE +#define cache_line_size() L1_CACHE_BYTES +#endif + #endif /* __LINUX_CACHE_H */ diff --git a/mm/slab.c b/mm/slab.c index 7bc4a136846e8960e49653a9816bfa9d9618a07c..39d20f8a07916fc7bf77a8153d58c0cf8161c90e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -139,10 +139,6 @@ #define BYTES_PER_WORD sizeof(void *) #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) -#ifndef cache_line_size -#define cache_line_size() L1_CACHE_BYTES -#endif - #ifndef ARCH_KMALLOC_MINALIGN /* * Enforce a minimum alignment for the kmalloc caches. diff --git a/mm/slub.c b/mm/slub.c index 48fff83a1e9dd822cbf912723c63024723d52df6..38914bc64aca2cd58a4c40e35b1b8ef45b9e55d9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -207,11 +207,6 @@ static inline void ClearSlabDebug(struct page *page) #define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */ #define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */ -/* Not all arches define cache_line_size */ -#ifndef cache_line_size -#define cache_line_size() L1_CACHE_BYTES -#endif - static int kmem_size = sizeof(struct kmem_cache); #ifdef CONFIG_SMP