diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c2fb8a87dccb2990a794bb8960bfdad85eb9a390..4d06e1c8294a3d0d260ca1198354e1cb4cf3bdfa 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1299,14 +1299,22 @@ config ARCH_DMA_ADDR_T_64BIT def_bool y depends on X86_64 || HIGHMEM64G +config ENABLE_DIRECT_GBPAGES + def_bool y + depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK + config DIRECT_GBPAGES bool "Enable 1GB pages for kernel pagetables" if EXPERT default y - depends on X86_64 - ---help--- - Allow the kernel linear mapping to use 1GB pages on CPUs that - support it. This can improve the kernel's performance a tiny bit by - reducing TLB pressure. If in doubt, say "Y". + depends on ENABLE_DIRECT_GBPAGES + ---help--- + Enable by default the kernel linear mapping to use 1GB pages on CPUs + that support it. This can improve the kernel's performance a tiny bit + by reducing TLB pressure. If in doubt, say "Y". If you've disabled + option but your platform is capable of handling support for this + you can use the gbpages kernel parameter. Likewise if you've enabled + this but you'd like to force disable this option you can use the + nogbpages kernel parameter. # Common NUMA Features config NUMA diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 74f2b37fd07386c39d4c81e284f4f08cd06f8e8e..2ce2c8e8c99c44e66862db3c51cd53ee53a17eae 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -131,16 +131,21 @@ void __init early_alloc_pgt_buf(void) int after_bootmem; +static int page_size_mask; + int direct_gbpages = IS_ENABLED(CONFIG_DIRECT_GBPAGES); static void __init init_gbpages(void) { -#ifdef CONFIG_X86_64 - if (direct_gbpages && cpu_has_gbpages) + if (!IS_ENABLED(CONFIG_ENABLE_DIRECT_GBPAGES)) { + direct_gbpages = 0; + return; + } + if (direct_gbpages && cpu_has_gbpages) { printk(KERN_INFO "Using GB pages for direct mapping\n"); - else + page_size_mask |= 1 << PG_LEVEL_1G; + } else direct_gbpages = 0; -#endif } struct map_range { @@ -149,8 +154,6 @@ struct map_range { unsigned page_size_mask; }; -static int page_size_mask; - static void __init probe_page_size_mask(void) { init_gbpages(); @@ -161,8 +164,6 @@ static void __init probe_page_size_mask(void) * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ - if (direct_gbpages) - page_size_mask |= 1 << PG_LEVEL_1G; if (cpu_has_pse) page_size_mask |= 1 << PG_LEVEL_2M; #endif diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 81e8282d8c2f2b415e991b7b130e4f70059b2deb..89af288ec6740cfd793a4c4804e939bb023e9453 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -81,11 +81,9 @@ void arch_report_meminfo(struct seq_file *m) seq_printf(m, "DirectMap4M: %8lu kB\n", direct_pages_count[PG_LEVEL_2M] << 12); #endif -#ifdef CONFIG_X86_64 if (direct_gbpages) seq_printf(m, "DirectMap1G: %8lu kB\n", direct_pages_count[PG_LEVEL_1G] << 20); -#endif } #else static inline void split_page_count(int level) { }