提交 0b8fdcbc 编写于 作者: S Suresh Siddha 提交者: Ingo Molnar

x86, cpa: dont use large pages for kernel identity mapping with DEBUG_PAGEALLOC

Don't use large pages for kernel identity mapping with DEBUG_PAGEALLOC.
This will remove the need to split the large page for the
allocated kernel page in the interrupt context.

This will simplify cpa code(as we don't do the split any more from the
interrupt context). cpa code simplication in the subsequent patches.
Signed-off-by: NSuresh Siddha <suresh.b.siddha@intel.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: arjan@linux.intel.com
Cc: venkatesh.pallipadi@intel.com
Cc: jeremy@goop.org
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 a2699e47
...@@ -777,7 +777,7 @@ void __init setup_bootmem_allocator(void) ...@@ -777,7 +777,7 @@ void __init setup_bootmem_allocator(void)
after_init_bootmem = 1; after_init_bootmem = 1;
} }
static void __init find_early_table_space(unsigned long end) static void __init find_early_table_space(unsigned long end, int use_pse)
{ {
unsigned long puds, pmds, ptes, tables, start; unsigned long puds, pmds, ptes, tables, start;
...@@ -787,7 +787,7 @@ static void __init find_early_table_space(unsigned long end) ...@@ -787,7 +787,7 @@ static void __init find_early_table_space(unsigned long end)
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
if (cpu_has_pse) { if (use_pse) {
unsigned long extra; unsigned long extra;
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
...@@ -827,12 +827,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, ...@@ -827,12 +827,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
pgd_t *pgd_base = swapper_pg_dir; pgd_t *pgd_base = swapper_pg_dir;
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
unsigned long big_page_start; unsigned long big_page_start;
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
int use_pse = 0;
#else
int use_pse = cpu_has_pse;
#endif
/* /*
* Find space for the kernel direct mapping tables. * Find space for the kernel direct mapping tables.
*/ */
if (!after_init_bootmem) if (!after_init_bootmem)
find_early_table_space(end); find_early_table_space(end, use_pse);
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
set_nx(); set_nx();
...@@ -878,7 +888,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, ...@@ -878,7 +888,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
cpu_has_pse); use_pse);
/* tail is not big page alignment ? */ /* tail is not big page alignment ? */
start_pfn = end_pfn; start_pfn = end_pfn;
......
...@@ -456,13 +456,14 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -456,13 +456,14 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
return phys_pud_init(pud, addr, end, page_size_mask); return phys_pud_init(pud, addr, end, page_size_mask);
} }
static void __init find_early_table_space(unsigned long end) static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
{ {
unsigned long puds, pmds, ptes, tables, start; unsigned long puds, pmds, ptes, tables, start;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
if (direct_gbpages) { if (use_gbpages) {
unsigned long extra; unsigned long extra;
extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
...@@ -470,7 +471,7 @@ static void __init find_early_table_space(unsigned long end) ...@@ -470,7 +471,7 @@ static void __init find_early_table_space(unsigned long end)
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
if (cpu_has_pse) { if (use_pse) {
unsigned long extra; unsigned long extra;
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
...@@ -640,6 +641,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, ...@@ -640,6 +641,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
struct map_range mr[NR_RANGE_MR]; struct map_range mr[NR_RANGE_MR];
int nr_range, i; int nr_range, i;
int use_pse, use_gbpages;
printk(KERN_INFO "init_memory_mapping\n"); printk(KERN_INFO "init_memory_mapping\n");
...@@ -653,9 +655,21 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, ...@@ -653,9 +655,21 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem) if (!after_bootmem)
init_gbpages(); init_gbpages();
if (direct_gbpages) #ifdef CONFIG_DEBUG_PAGEALLOC
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
use_pse = use_gbpages = 0;
#else
use_pse = cpu_has_pse;
use_gbpages = direct_gbpages;
#endif
if (use_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G; page_size_mask |= 1 << PG_LEVEL_1G;
if (cpu_has_pse) if (use_pse)
page_size_mask |= 1 << PG_LEVEL_2M; page_size_mask |= 1 << PG_LEVEL_2M;
memset(mr, 0, sizeof(mr)); memset(mr, 0, sizeof(mr));
...@@ -716,7 +730,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, ...@@ -716,7 +730,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
(mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
if (!after_bootmem) if (!after_bootmem)
find_early_table_space(end); find_early_table_space(end, use_pse, use_gbpages);
for (i = 0; i < nr_range; i++) for (i = 0; i < nr_range; i++)
last_map_addr = kernel_physical_mapping_init( last_map_addr = kernel_physical_mapping_init(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册