diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dfd908630631a30851a8f573e41dbe8b21af5f19..d66f9db3e6db2d737c36156a25dabf7a55e283e1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -75,6 +75,7 @@ config ARM64 select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_VMAP_STACK select HAVE_ARM_SMCCC select HAVE_EBPF_JIT select HAVE_C_RECORDMCOUNT diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 0e8cc3b85bb879d359751cbc4508669b867b2e74..2b1e5def2e49630538f4d97b8f8ead39abfb78da 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -49,7 +49,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); */ #define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ -#define EFI_KIMG_ALIGN SEGMENT_ALIGN +/* + * In some configurations (e.g. VMAP_STACK && 64K pages), stacks built into the + * kernel need greater alignment than we require the segments to be padded to. + */ +#define EFI_KIMG_ALIGN \ + (SEGMENT_ALIGN > THREAD_ALIGN ? SEGMENT_ALIGN : THREAD_ALIGN) /* on arm64, the FDT may be located anywhere in system RAM */ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 7fa6ad48d57470c8f20e39b2134fdaf30e4cbbec..c5cd2c599b24e905e3ff8abcae1edc109960b33a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -102,7 +102,17 @@ #define KASAN_SHADOW_SIZE (0) #endif -#define THREAD_SHIFT 14 +#define MIN_THREAD_SHIFT 14 + +/* + * VMAP'd stacks are allocated at page granularity, so we must ensure that such + * stacks are a multiple of page size. + */ +#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) +#define THREAD_SHIFT PAGE_SHIFT +#else +#define THREAD_SHIFT MIN_THREAD_SHIFT +#endif #if THREAD_SHIFT >= PAGE_SHIFT #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) @@ -110,6 +120,17 @@ #define THREAD_SIZE (UL(1) << THREAD_SHIFT) +/* + * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by + * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry + * assembly. + */ +#ifdef CONFIG_VMAP_STACK +#define THREAD_ALIGN (2 * THREAD_SIZE) +#else +#define THREAD_ALIGN THREAD_SIZE +#endif + #define IRQ_STACK_SIZE THREAD_SIZE /* diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 5141282e47d5821a46cdee8d0f14b9cf93a9ff1d..713561e5bcabc5467e2d86c9d88ec75be2c34a44 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -23,15 +23,15 @@ #include #include +#include #include #include #include #include +#include unsigned long irq_err_count; -/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ -DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16); DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); int arch_show_interrupts(struct seq_file *p, int prec) @@ -51,6 +51,31 @@ void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) handle_arch_irq = handle_irq; } +#ifdef CONFIG_VMAP_STACK +static void init_irq_stacks(void) +{ + int cpu; + unsigned long *p; + + for_each_possible_cpu(cpu) { + /* + * To ensure that VMAP'd stack overflow detection works + * correctly, the IRQ stacks need to have the same + * alignment as other stacks. + */ + p = __vmalloc_node_range(IRQ_STACK_SIZE, THREAD_ALIGN, + VMALLOC_START, VMALLOC_END, + THREADINFO_GFP, PAGE_KERNEL, + 0, cpu_to_node(cpu), + __builtin_return_address(0)); + + per_cpu(irq_stack_ptr, cpu) = p; + } +} +#else +/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ +DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack); + static void init_irq_stacks(void) { int cpu; @@ -58,6 +83,7 @@ static void init_irq_stacks(void) for_each_possible_cpu(cpu) per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); } +#endif void __init init_IRQ(void) { diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 71565386d0631548385ad9dee01cd535277a7021..fe56c268a7d9c37374f8126cae0af243adace762 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -176,7 +176,7 @@ SECTIONS _data = .; _sdata = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) /* * Data written with the MMU off but read with the MMU on requires