diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index d061176d57ea178b8bcd91f673d86142735a7524..64d7cbfe067baf84ebf9b8b403ee03587338bda1 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,15 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#include #include -static inline bool arch_kfence_init_pool(void) { return true; } +static inline bool arch_kfence_init_pool(void) +{ + memset(__kfence_pool, 0, KFENCE_POOL_SIZE); + + return true; +} static inline bool kfence_protect_page(unsigned long addr, bool protect) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f6e56847a4e9d246de9d77960fe402204697204b..ddca8d27fca6f58d56265ff7dfe36a0f1a9e5a55 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -374,6 +375,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) arm64_memblock_init(); + kfence_early_alloc_pool(); + efi_fake_memmap(); efi_find_mirror(); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 7314a7a3613fb422e304db1cbe1a22b887d687c4..56bfd692ebd997378ddc16631da3682dba4fc9e6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -477,10 +478,19 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0, eflags = 0; u64 i; - if (rodata_full || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; +#ifdef CONFIG_KFENCE + /* + * KFENCE requires linear map to be mapped at page granularity, so + * temporarily skip mapping for __kfence_pool in the following + * for-loop + */ + if (__kfence_pool) + memblock_mark_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); +#endif + /* * Take care not to create a writable alias for the * read-only text and rodata sections of the kernel image. @@ -553,6 +563,18 @@ static void __init map_mem(pgd_t *pgdp) resource_size(&crashk_res)); } #endif +#ifdef CONFIG_KFENCE + /* + * Map the __kfence_pool at page granularity now. + */ + if (__kfence_pool) { + __map_memblock(pgdp, __pa(__kfence_pool), + __pa(__kfence_pool + KFENCE_POOL_SIZE), + pgprot_tagged(PAGE_KERNEL), + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); + } +#endif } void mark_rodata_ro(void) @@ -1483,12 +1505,7 @@ int arch_add_memory(int nid, u64 start, u64 size, } - /* - * KFENCE requires linear map to be mapped at page granularity, so that - * it is possible to protect/unprotect single pages in the KFENCE pool. - */ - if (rodata_full || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 4b5e3679a72c78caf5a6aa2c185658328403fa2f..00dee7459cd9461d57c5270b223e3c54864e3b22 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -56,6 +56,7 @@ static __always_inline bool is_kfence_address(const void *addr) return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } +void __init kfence_early_alloc_pool(void); /** * kfence_alloc_pool() - allocate the KFENCE pool via memblock */ @@ -205,6 +206,7 @@ bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, st #else /* CONFIG_KFENCE */ static inline bool is_kfence_address(const void *addr) { return false; } +static inline void kfence_early_alloc_pool(void) { } static inline void kfence_alloc_pool(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } diff --git a/mm/kfence/core.c b/mm/kfence/core.c index a19154a8d1964bd72391df9e344d26a23e1bd656..370721509958a7a38fefe09d9f0bdcd62e71a4f5 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -752,14 +752,26 @@ static void toggle_allocation_gate(struct work_struct *work) static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate); /* === Public interface ===================================================== */ +void __init kfence_early_alloc_pool(void) +{ + if (!kfence_sample_interval) + return; + + __kfence_pool = memblock_alloc_raw(KFENCE_POOL_SIZE, PAGE_SIZE); + + if (!__kfence_pool) { + kfence_sample_interval = 0; + pr_err("failed to early allocate pool, disable KFENCE\n"); + } +} void __init kfence_alloc_pool(void) { if (!kfence_sample_interval) return; - __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); - + if (!__kfence_pool) + __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); if (!__kfence_pool) pr_err("failed to allocate pool\n"); }